//===---------------------------------------------------------------------===//
-We should make the following changes to clean up MachineInstr:
+With the recent changes to make the implicit def/use set explicit in
+machineinstrs, we should change the target descriptions for 'call' instructions
+so that the .td files don't list all the call-clobbered registers as implicit
+defs. Instead, these should be added by the code generator (e.g. on the dag).
-1. Add an Opcode field to TargetInstrDescriptor, so you can tell the opcode of
- an instruction with just a TargetInstrDescriptor*.
-2. Remove the Opcode field from MachineInstr, replacing it with a
- TargetInstrDescriptor*.
-3. Getting information about a machine instr then becomes:
- MI->getInfo()->isTwoAddress()
- instead of:
- const TargetInstrInfo &TII = ...
- TII.isTwoAddrInstr(MI->getOpcode())
+This has a number of uses:
-//===---------------------------------------------------------------------===//
-
-FreeBench/mason contains code like this:
-
-static p_type m0u(p_type p) {
- int m[]={0, 8, 1, 2, 16, 5, 13, 7, 14, 9, 3, 4, 11, 12, 15, 10, 17, 6};
- p_type pu;
- pu.a = m[p.a];
- pu.b = m[p.b];
- pu.c = m[p.c];
- return pu;
-}
-
-We currently compile this into a memcpy from a static array into 'm', then
-a bunch of loads from m. It would be better to avoid the memcpy and just do
-loads from the static array.
+1. PPC32/64 and X86 32/64 can avoid having multiple copies of call instructions
+ for their different impdef sets.
+2. Targets with multiple calling convs (e.g. x86) which have different clobber
+ sets don't need copies of call instructions.
+3. 'Interprocedural register allocation' can be done to reduce the clobber sets
+ of calls.
//===---------------------------------------------------------------------===//
return bar(z, n) + bar(2*z, 2*n);
}
+Reassociate should handle the example in GCC PR16157.
+
//===---------------------------------------------------------------------===//
These two functions should generate the same code on big-endian systems:
//===---------------------------------------------------------------------===//
-This code:
-int rot(unsigned char b) { int a = ((b>>1) ^ (b<<7)) & 0xff; return a; }
-
-Can be improved in two ways:
-
-1. The instcombiner should eliminate the type conversions.
-2. The X86 backend should turn this into a rotate by one bit.
-
-//===---------------------------------------------------------------------===//
-
-Add LSR exit value substitution. It'll probably be a win for Ackermann, etc.
-
-//===---------------------------------------------------------------------===//
-
It would be nice to revert this patch:
http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20060213/031986.html
//===---------------------------------------------------------------------===//
-For packed types, TargetData.cpp::getTypeInfo() returns alignment that is equal
+For vector types, TargetData.cpp::getTypeInfo() returns alignment that is equal
to the type size. It works but can be overly conservative as the alignment of
-specific packed types are target dependent.
+specific vector types are target dependent.
//===---------------------------------------------------------------------===//
//===---------------------------------------------------------------------===//
-We should constant fold packed type casts at the LLVM level, regardless of the
-cast. Currently we cannot fold some casts because we don't have TargetData
-information in the constant folder, so we don't know the endianness of the
-target!
-
-//===---------------------------------------------------------------------===//
-
Add support for conditional increments, and other related patterns. Instead
of:
Scalar Repl cannot currently promote this testcase to 'ret long cst':
- %struct.X = type { int, int }
+ %struct.X = type { i32, i32 }
%struct.Y = type { %struct.X }
-ulong %bar() {
- %retval = alloca %struct.Y, align 8 ; <%struct.Y*> [#uses=3]
- %tmp12 = getelementptr %struct.Y* %retval, int 0, uint 0, uint 0
- store int 0, int* %tmp12
- %tmp15 = getelementptr %struct.Y* %retval, int 0, uint 0, uint 1
- store int 1, int* %tmp15
- %retval = cast %struct.Y* %retval to ulong*
- %retval = load ulong* %retval ; <ulong> [#uses=1]
- ret ulong %retval
+
+define i64 @bar() {
+ %retval = alloca %struct.Y, align 8
+ %tmp12 = getelementptr %struct.Y* %retval, i32 0, i32 0, i32 0
+ store i32 0, i32* %tmp12
+ %tmp15 = getelementptr %struct.Y* %retval, i32 0, i32 0, i32 1
+ store i32 1, i32* %tmp15
+ %retval.upgrd.1 = bitcast %struct.Y* %retval to i64*
+ %retval.upgrd.2 = load i64* %retval.upgrd.1
+ ret i64 %retval.upgrd.2
}
it should be extended to do so.
//===---------------------------------------------------------------------===//
+-scalarrepl should promote this to be a vector scalar.
+
+ %struct..0anon = type { <4 x float> }
+
+define void @test1(<4 x float> %V, float* %P) {
+ %u = alloca %struct..0anon, align 16
+ %tmp = getelementptr %struct..0anon* %u, i32 0, i32 0
+ store <4 x float> %V, <4 x float>* %tmp
+ %tmp1 = bitcast %struct..0anon* %u to [4 x float]*
+ %tmp.upgrd.1 = getelementptr [4 x float]* %tmp1, i32 0, i32 1
+ %tmp.upgrd.2 = load float* %tmp.upgrd.1
+ %tmp3 = mul float %tmp.upgrd.2, 2.000000e+00
+ store float %tmp3, float* %P
+ ret void
+}
+
+//===---------------------------------------------------------------------===//
+
Turn this into a single byte store with no load (the other 3 bytes are
unmodified):
return v;
}
+Nor is this (yes, it really is bswap):
+
+unsigned long reverse(unsigned v) {
+ unsigned t;
+ t = v ^ ((v << 16) | (v >> 16));
+ t &= ~0xff0000;
+ v = (v << 24) | (v >> 8);
+ return v ^ (t >> 8);
+}
+
//===---------------------------------------------------------------------===//
These should turn into single 16-bit (unaligned?) loads on little/big endian
//===---------------------------------------------------------------------===//
--scalarrepl should promote this to be a vector scalar.
-
- %struct..0anon = type { <4 x float> }
-implementation ; Functions:
-void %test1(<4 x float> %V, float* %P) {
-entry:
- %u = alloca %struct..0anon, align 16 ; <%struct..0anon*> [#uses=2]
- %tmp = getelementptr %struct..0anon* %u, int 0, uint 0 ; <<4 x float>*> [#uses=1]
- store <4 x float> %V, <4 x float>* %tmp
- %tmp1 = cast %struct..0anon* %u to [4 x float]* ; <[4 x float]*> [#uses=1]
- %tmp = getelementptr [4 x float]* %tmp1, int 0, int 1 ; <float*> [#uses=1]
- %tmp = load float* %tmp ; <float> [#uses=1]
- %tmp3 = mul float %tmp, 2.000000e+00 ; <float> [#uses=1]
- store float %tmp3, float* %P
- ret void
-}
-
-//===---------------------------------------------------------------------===//
-
-instcombine should handle this transform:
- setcc (sdiv X / C1 ), C2
+ icmp pred (sdiv X / C1 ), C2
when X, C1, and C2 are unsigned. Similarly for udiv and signed operands.
Currently InstCombine avoids this transform but will do it when the signs of
http://gcc.gnu.org/ml/gcc-patches/2006-10/msg01519.html
//===---------------------------------------------------------------------===//
+
+viterbi speeds up *significantly* if the various "history" related copy loops
+are turned into memcpy calls at the source level. We need a "loops to memcpy"
+pass.
+
+//===---------------------------------------------------------------------===//
+
+Consider:
+
+typedef unsigned U32;
+typedef unsigned long long U64;
+int test (U32 *inst, U64 *regs) {
+ U64 effective_addr2;
+ U32 temp = *inst;
+ int r1 = (temp >> 20) & 0xf;
+ int b2 = (temp >> 16) & 0xf;
+ effective_addr2 = temp & 0xfff;
+ if (b2) effective_addr2 += regs[b2];
+ b2 = (temp >> 12) & 0xf;
+ if (b2) effective_addr2 += regs[b2];
+ effective_addr2 &= regs[4];
+ if ((effective_addr2 & 3) == 0)
+ return 1;
+ return 0;
+}
+
+Note that only the low 2 bits of effective_addr2 are used. On 32-bit systems,
+we don't eliminate the computation of the top half of effective_addr2 because
+we don't have whole-function selection dags. On x86, this means we use one
+extra register for the function when effective_addr2 is declared as U64 than
+when it is declared U32.
+
+//===---------------------------------------------------------------------===//
+
+Promote for i32 bswap can use i64 bswap + shr. Useful on targets with 64-bit
+regs and bswap, like itanium.
+
+//===---------------------------------------------------------------------===//
+
+LSR should know what GPR types a target has. This code:
+
+volatile short X, Y; // globals
+
+void foo(int N) {
+ int i;
+ for (i = 0; i < N; i++) { X = i; Y = i*4; }
+}
+
+produces two identical IV's (after promotion) on PPC/ARM:
+
+LBB1_1: @bb.preheader
+ mov r3, #0
+ mov r2, r3
+ mov r1, r3
+LBB1_2: @bb
+ ldr r12, LCPI1_0
+ ldr r12, [r12]
+ strh r2, [r12]
+ ldr r12, LCPI1_1
+ ldr r12, [r12]
+ strh r3, [r12]
+ add r1, r1, #1 <- [0,+,1]
+ add r3, r3, #4
+ add r2, r2, #1 <- [0,+,1]
+ cmp r1, r0
+ bne LBB1_2 @bb
+
+
+//===---------------------------------------------------------------------===//
+
+Tail call elim should be more aggressive, checking to see if the call is
+followed by an uncond branch to an exit block.
+
+; This testcase is due to tail-duplication not wanting to copy the return
+; instruction into the terminating blocks because there was other code
+; optimized out of the function after the taildup happened.
+;RUN: llvm-upgrade < %s | llvm-as | opt -tailcallelim | llvm-dis | not grep call
+
+int %t4(int %a) {
+entry:
+ %tmp.1 = and int %a, 1
+ %tmp.2 = cast int %tmp.1 to bool
+ br bool %tmp.2, label %then.0, label %else.0
+
+then.0:
+ %tmp.5 = add int %a, -1
+ %tmp.3 = call int %t4( int %tmp.5 )
+ br label %return
+
+else.0:
+ %tmp.7 = setne int %a, 0
+ br bool %tmp.7, label %then.1, label %return
+
+then.1:
+ %tmp.11 = add int %a, -2
+ %tmp.9 = call int %t4( int %tmp.11 )
+ br label %return
+
+return:
+ %result.0 = phi int [ 0, %else.0 ], [ %tmp.3, %then.0 ],
+ [ %tmp.9, %then.1 ]
+ ret int %result.0
+}
+
+//===---------------------------------------------------------------------===//
+
+Tail recursion elimination is not transforming this function, because it is
+returning n, which fails the isDynamicConstant check in the accumulator
+recursion checks.
+
+long long fib(const long long n) {
+ switch(n) {
+ case 0:
+ case 1:
+ return n;
+ default:
+ return fib(n-1) + fib(n-2);
+ }
+}
+
+//===---------------------------------------------------------------------===//
+
+Argument promotion should promote arguments for recursive functions, like
+this:
+
+; RUN: llvm-upgrade < %s | llvm-as | opt -argpromotion | llvm-dis | grep x.val
+
+implementation ; Functions:
+
+internal int %foo(int* %x) {
+entry:
+ %tmp = load int* %x
+ %tmp.foo = call int %foo(int *%x)
+ ret int %tmp.foo
+}
+
+int %bar(int* %x) {
+entry:
+ %tmp3 = call int %foo( int* %x) ; <int>[#uses=1]
+ ret int %tmp3
+}
+
+//===---------------------------------------------------------------------===//
+
+"basicaa" should know how to look through "or" instructions that act like add
+instructions. For example in this code, the x*4+1 is turned into x*4 | 1, and
+basicaa can't analyze the array subscript, leading to duplicated loads in the
+generated code:
+
+void test(int X, int Y, int a[]) {
+int i;
+ for (i=2; i<1000; i+=4) {
+ a[i+0] = a[i-1+0]*a[i-2+0];
+ a[i+1] = a[i-1+1]*a[i-2+1];
+ a[i+2] = a[i-1+2]*a[i-2+2];
+ a[i+3] = a[i-1+3]*a[i-2+3];
+ }
+}
+
+//===---------------------------------------------------------------------===//
+
+We should investigate an instruction sinking pass. Consider this silly
+example in pic mode:
+
+#include <assert.h>
+void foo(int x) {
+ assert(x);
+ //...
+}
+
+we compile this to:
+_foo:
+ subl $28, %esp
+ call "L1$pb"
+"L1$pb":
+ popl %eax
+ cmpl $0, 32(%esp)
+ je LBB1_2 # cond_true
+LBB1_1: # return
+ # ...
+ addl $28, %esp
+ ret
+LBB1_2: # cond_true
+...
+
+The PIC base computation (call+popl) is only used on one path through the
+code, but is currently always computed in the entry block. It would be
+better to sink the picbase computation down into the block for the
+assertion, as it is the only one that uses it. This happens for a lot of
+code with early outs.
+
+Another example is loads of arguments, which are usually emitted into the
+entry block on targets like x86. If not used in all paths through a
+function, they should be sunk into the ones that do.
+
+In this case, whole-function-isel would also handle this.
+
+//===---------------------------------------------------------------------===//