... which should only be one imul instruction.
+This can be done with a custom expander, but it would be nice to move this to
+generic code.
+
//===---------------------------------------------------------------------===//
This should be one DIV/IDIV instruction, not a libcall:
Another useful one would be ~0ULL >> X and ~0ULL << X.
+One better solution for 1LL << x is:
+ xorl %eax, %eax
+ xorl %edx, %edx
+ testb $32, %cl
+ sete %al
+ setne %dl
+ sall %cl, %eax
+ sall %cl, %edx
+
+But that requires good 8-bit subreg support.
+
+64-bit shifts (in general) expand to really bad code. Instead of using
+cmovs, we should expand to a conditional branch like GCC produces.
+
//===---------------------------------------------------------------------===//
Compile this:
//===---------------------------------------------------------------------===//
-Model X86 EFLAGS as a real register to avoid redudant cmp / test. e.g.
-
- cmpl $1, %eax
- setg %al
- testb %al, %al # unnecessary
- jne .BB7
-
-//===---------------------------------------------------------------------===//
-
Count leading zeros and count trailing zeros:
int clz(int X) { return __builtin_clz(X); }
should be made smart enough to cannonicalize the load into the RHS of a compare
when it can invert the result of the compare for free.
+//===---------------------------------------------------------------------===//
+
How about intrinsics? An example is:
*res = _mm_mulhi_epu16(*A, _mm_mul_epu32(*B, *C));
//===---------------------------------------------------------------------===//
-The DAG Isel doesn't fold the loads into the adds in this testcase. The
-pattern selector does. This is because the chain value of the load gets
-selected first, and the loads aren't checking to see if they are only used by
-and add.
-
-.ll:
-
-int %test(int* %x, int* %y, int* %z) {
- %X = load int* %x
- %Y = load int* %y
- %Z = load int* %z
- %a = add int %X, %Y
- %b = add int %a, %Z
- ret int %b
-}
-
-dag isel:
-
-_test:
- movl 4(%esp), %eax
- movl (%eax), %eax
- movl 8(%esp), %ecx
- movl (%ecx), %ecx
- addl %ecx, %eax
- movl 12(%esp), %ecx
- movl (%ecx), %ecx
- addl %ecx, %eax
- ret
-
-pattern isel:
-
-_test:
- movl 12(%esp), %ecx
- movl 4(%esp), %edx
- movl 8(%esp), %eax
- movl (%eax), %eax
- addl (%edx), %eax
- addl (%ecx), %eax
- ret
-
-This is bad for register pressure, though the dag isel is producing a
-better schedule. :)
-
-//===---------------------------------------------------------------------===//
-
In many cases, LLVM generates code like this:
_test:
_test:
movl 8(%esp), %ebx
- xor %eax, %eax
+ xor %eax, %eax
cmpl %ebx, 4(%esp)
setl %al
ret
//===---------------------------------------------------------------------===//
-We should generate 'test' instead of 'cmp' in various cases, e.g.:
-
-bool %test(int %X) {
- %Y = shl int %X, ubyte 1
- %C = seteq int %Y, 0
- ret bool %C
-}
-bool %test(int %X) {
- %Y = and int %X, 8
- %C = seteq int %Y, 0
- ret bool %C
-}
-
-This may just be a matter of using 'test' to write bigger patterns for X86cmp.
-
-An important case is comparison against zero:
-
-if (X == 0) ...
-
-instead of:
-
- cmpl $0, %eax
- je LBB4_2 #cond_next
-
-use:
- test %eax, %eax
- jz LBB4_2
-
-which is smaller.
-
-//===---------------------------------------------------------------------===//
-
We should generate bts/btr/etc instructions on targets where they are cheap or
when codesize is important. e.g., for:
//===---------------------------------------------------------------------===//
-Investigate whether it is better to codegen the following
-
- %tmp.1 = mul int %x, 9
-to
-
- movl 4(%esp), %eax
- leal (%eax,%eax,8), %eax
-
-as opposed to what llc is currently generating:
-
- imull $9, 4(%esp), %eax
-
-Currently the load folding imull has a higher complexity than the LEA32 pattern.
-
-//===---------------------------------------------------------------------===//
-
We are currently lowering large (1MB+) memmove/memcpy to rep/stosl and rep/movsl
We should leave these as libcalls for everything over a much lower threshold,
since libc is hand tuned for medium and large mem ops (avoiding RFO for large
//===---------------------------------------------------------------------===//
-This code generates ugly code, probably due to costs being off or something:
-
-void %test(float* %P, <4 x float>* %P2 ) {
- %xFloat0.688 = load float* %P
- %loadVector37.712 = load <4 x float>* %P2
- %inFloat3.713 = insertelement <4 x float> %loadVector37.712, float 0.000000e+00, uint 3
- store <4 x float> %inFloat3.713, <4 x float>* %P2
- ret void
-}
-
-Generates:
-
-_test:
- pxor %xmm0, %xmm0
- movd %xmm0, %eax ;; EAX = 0!
- movl 8(%esp), %ecx
- movaps (%ecx), %xmm0
- pinsrw $6, %eax, %xmm0
- shrl $16, %eax ;; EAX = 0 again!
- pinsrw $7, %eax, %xmm0
- movaps %xmm0, (%ecx)
- ret
-
-It would be better to generate:
-
-_test:
- movl 8(%esp), %ecx
- movaps (%ecx), %xmm0
- xor %eax, %eax
- pinsrw $6, %eax, %xmm0
- pinsrw $7, %eax, %xmm0
- movaps %xmm0, (%ecx)
- ret
-
-or use pxor (to make a zero vector) and shuffle (to insert it).
-
-//===---------------------------------------------------------------------===//
-
Bad codegen:
char foo(int x) { return x; }
//===---------------------------------------------------------------------===//
-Some ideas for instruction selection code simplification: 1. A pre-pass to
-determine which chain producing node can or cannot be folded. The generated
-isel code would then use the information. 2. The same pre-pass can force
-ordering of TokenFactor operands to allow load / store folding. 3. During isel,
-instead of recursively going up the chain operand chain, mark the chain operand
-as available and put it in some work list. Select other nodes in the normal
-manner. The chain operands are selected after all other nodes are selected. Uses
-of chain nodes are modified after instruction selection is completed.
-
-//===---------------------------------------------------------------------===//
-
Another instruction selector deficiency:
void %bar() {
//===---------------------------------------------------------------------===//
-Consider:
-int foo(int *a, int t) {
-int x;
-for (x=0; x<40; ++x)
- t = t + a[x] + x;
-return t;
-}
-
-We generate:
-LBB1_1: #cond_true
- movl %ecx, %esi
- movl (%edx,%eax,4), %edi
- movl %esi, %ecx
- addl %edi, %ecx
- addl %eax, %ecx
- incl %eax
- cmpl $40, %eax
- jne LBB1_1 #cond_true
+int %foo(int* %a, int %t) {
+entry:
+ br label %cond_true
-GCC generates:
+cond_true: ; preds = %cond_true, %entry
+ %x.0.0 = phi int [ 0, %entry ], [ %tmp9, %cond_true ]
+ %t_addr.0.0 = phi int [ %t, %entry ], [ %tmp7, %cond_true ]
+ %tmp2 = getelementptr int* %a, int %x.0.0
+ %tmp3 = load int* %tmp2 ; <int> [#uses=1]
+ %tmp5 = add int %t_addr.0.0, %x.0.0 ; <int> [#uses=1]
+ %tmp7 = add int %tmp5, %tmp3 ; <int> [#uses=2]
+ %tmp9 = add int %x.0.0, 1 ; <int> [#uses=2]
+ %tmp = setgt int %tmp9, 39 ; <bool> [#uses=1]
+ br bool %tmp, label %bb12, label %cond_true
-L2:
- addl (%ecx,%edx,4), %eax
- addl %edx, %eax
- addl $1, %edx
- cmpl $40, %edx
- jne L2
-
-Smells like a register coallescing/reassociation issue.
-
-//===---------------------------------------------------------------------===//
+bb12: ; preds = %cond_true
+ ret int %tmp7
+}
-Use cpuid to auto-detect CPU features such as SSE, SSE2, and SSE3.
+is pessimized by -loop-reduce and -indvars
//===---------------------------------------------------------------------===//
0000002a flds (%esp,1)
0000002d addl $0x04,%esp
00000030 ret
+
+//===---------------------------------------------------------------------===//
+
+When using fastcc abi, align stack slot of argument of type double on 8 byte
+boundary to improve performance.
+
+//===---------------------------------------------------------------------===//
+
+Codegen:
+
+int f(int a, int b) {
+ if (a == 4 || a == 6)
+ b++;
+ return b;
+}
+
+
+as:
+
+or eax, 2
+cmp eax, 6
+jz label
+
+//===---------------------------------------------------------------------===//
+
+GCC's ix86_expand_int_movcc function (in i386.c) has a ton of interesting
+simplifications for integer "x cmp y ? a : b". For example, instead of:
+
+int G;
+void f(int X, int Y) {
+ G = X < 0 ? 14 : 13;
+}
+
+compiling to:
+
+_f:
+ movl $14, %eax
+ movl $13, %ecx
+ movl 4(%esp), %edx
+ testl %edx, %edx
+ cmovl %eax, %ecx
+ movl %ecx, _G
+ ret
+
+it could be:
+_f:
+ movl 4(%esp), %eax
+ sarl $31, %eax
+ notl %eax
+ addl $14, %eax
+ movl %eax, _G
+ ret
+
+etc.
+
+//===---------------------------------------------------------------------===//
+
+Currently we don't have elimination of redundant stack manipulations. Consider
+the code:
+
+int %main() {
+entry:
+ call fastcc void %test1( )
+ call fastcc void %test2( sbyte* cast (void ()* %test1 to sbyte*) )
+ ret int 0
+}
+
+declare fastcc void %test1()
+
+declare fastcc void %test2(sbyte*)
+
+
+This currently compiles to:
+
+ subl $16, %esp
+ call _test5
+ addl $12, %esp
+ subl $16, %esp
+ movl $_test5, (%esp)
+ call _test6
+ addl $12, %esp
+
+The add\sub pair is really unneeded here.
+
+//===---------------------------------------------------------------------===//
+
+We generate really bad code in some cases due to lowering SETCC/SELECT at
+legalize time, which prevents the post-legalize dag combine pass from
+understanding the code. As a silly example, this prevents us from folding
+stuff like this:
+
+bool %test(ulong %x) {
+ %tmp = setlt ulong %x, 4294967296
+ ret bool %tmp
+}
+
+into x.h == 0
+
+//===---------------------------------------------------------------------===//
+
+We currently compile sign_extend_inreg into two shifts:
+
+long foo(long X) {
+ return (long)(signed char)X;
+}
+
+becomes:
+
+_foo:
+ movl 4(%esp), %eax
+ shll $24, %eax
+ sarl $24, %eax
+ ret
+
+This could be:
+
+_foo:
+ movsbl 4(%esp),%eax
+ ret
+
+//===---------------------------------------------------------------------===//
+
+Consider the expansion of:
+
+uint %test3(uint %X) {
+ %tmp1 = rem uint %X, 255
+ ret uint %tmp1
+}
+
+Currently it compiles to:
+
+...
+ movl $2155905153, %ecx
+ movl 8(%esp), %esi
+ movl %esi, %eax
+ mull %ecx
+...
+
+This could be "reassociated" into:
+
+ movl $2155905153, %eax
+ movl 8(%esp), %ecx
+ mull %ecx
+
+to avoid the copy. In fact, the existing two-address stuff would do this
+except that mul isn't a commutative 2-addr instruction. I guess this has
+to be done at isel time based on the #uses to mul?
+