1 //===---------------------------------------------------------------------===//
2 // Random ideas for the X86 backend.
3 //===---------------------------------------------------------------------===//
5 Add a MUL2U and MUL2S nodes to represent a multiply that returns both the
6 Hi and Lo parts (combination of MUL and MULH[SU] into one node). Add this to
7 X86, & make the dag combiner produce it when needed. This will eliminate one
8 imul from the code generated for:
10 long long test(long long X, long long Y) { return X*Y; }
12 by using the EAX result from the mul. We should add a similar node for
17 long long test(int X, int Y) { return (long long)X*Y; }
19 ... which should only be one imul instruction.
21 //===---------------------------------------------------------------------===//
23 This should be one DIV/IDIV instruction, not a libcall:
25 unsigned test(unsigned long long X, unsigned Y) {
29 This can be done trivially with a custom legalizer. What about overflow
30 though? http://gcc.gnu.org/bugzilla/show_bug.cgi?id=14224
32 //===---------------------------------------------------------------------===//
34 Improvements to the multiply -> shift/add algorithm:
35 http://gcc.gnu.org/ml/gcc-patches/2004-08/msg01590.html
37 //===---------------------------------------------------------------------===//
39 Improve code like this (occurs fairly frequently, e.g. in LLVM):
40 long long foo(int x) { return 1LL << x; }
42 http://gcc.gnu.org/ml/gcc-patches/2004-09/msg01109.html
43 http://gcc.gnu.org/ml/gcc-patches/2004-09/msg01128.html
44 http://gcc.gnu.org/ml/gcc-patches/2004-09/msg01136.html
46 Another useful one would be ~0ULL >> X and ~0ULL << X.
48 //===---------------------------------------------------------------------===//
51 _Bool f(_Bool a) { return a!=1; }
58 //===---------------------------------------------------------------------===//
62 1. Dynamic programming based approach when compile time if not an
64 2. Code duplication (addressing mode) during isel.
65 3. Other ideas from "Register-Sensitive Selection, Duplication, and
66 Sequencing of Instructions".
67 4. Scheduling for reduced register pressure. E.g. "Minimum Register
68 Instruction Sequence Problem: Revisiting Optimal Code Generation for DAGs"
69 and other related papers.
70 http://citeseer.ist.psu.edu/govindarajan01minimum.html
72 //===---------------------------------------------------------------------===//
74 Should we promote i16 to i32 to avoid partial register update stalls?
76 //===---------------------------------------------------------------------===//
78 Leave any_extend as pseudo instruction and hint to register
79 allocator. Delay codegen until post register allocation.
81 //===---------------------------------------------------------------------===//
83 Model X86 EFLAGS as a real register to avoid redudant cmp / test. e.g.
87 testb %al, %al # unnecessary
90 //===---------------------------------------------------------------------===//
92 Count leading zeros and count trailing zeros:
94 int clz(int X) { return __builtin_clz(X); }
95 int ctz(int X) { return __builtin_ctz(X); }
97 $ gcc t.c -S -o - -O3 -fomit-frame-pointer -masm=intel
99 bsr %eax, DWORD PTR [%esp+4]
103 bsf %eax, DWORD PTR [%esp+4]
106 however, check that these are defined for 0 and 32. Our intrinsics are, GCC's
109 //===---------------------------------------------------------------------===//
111 Use push/pop instructions in prolog/epilog sequences instead of stores off
112 ESP (certain code size win, perf win on some [which?] processors).
113 Also, it appears icc use push for parameter passing. Need to investigate.
115 //===---------------------------------------------------------------------===//
117 Only use inc/neg/not instructions on processors where they are faster than
118 add/sub/xor. They are slower on the P4 due to only updating some processor
121 //===---------------------------------------------------------------------===//
123 The instruction selector sometimes misses folding a load into a compare. The
124 pattern is written as (cmp reg, (load p)). Because the compare isn't
125 commutative, it is not matched with the load on both sides. The dag combiner
126 should be made smart enough to cannonicalize the load into the RHS of a compare
127 when it can invert the result of the compare for free.
129 How about intrinsics? An example is:
130 *res = _mm_mulhi_epu16(*A, _mm_mul_epu32(*B, *C));
133 pmuludq (%eax), %xmm0
138 The transformation probably requires a X86 specific pass or a DAG combiner
139 target specific hook.
141 //===---------------------------------------------------------------------===//
143 The DAG Isel doesn't fold the loads into the adds in this testcase. The
144 pattern selector does. This is because the chain value of the load gets
145 selected first, and the loads aren't checking to see if they are only used by
150 int %test(int* %x, int* %y, int* %z) {
183 This is bad for register pressure, though the dag isel is producing a
186 //===---------------------------------------------------------------------===//
188 In many cases, LLVM generates code like this:
197 on some processors (which ones?), it is more efficient to do this:
206 Doing this correctly is tricky though, as the xor clobbers the flags.
208 //===---------------------------------------------------------------------===//
210 We should generate 'test' instead of 'cmp' in various cases, e.g.:
213 %Y = shl int %X, ubyte 1
223 This may just be a matter of using 'test' to write bigger patterns for X86cmp.
225 An important case is comparison against zero:
240 //===---------------------------------------------------------------------===//
242 We should generate bts/btr/etc instructions on targets where they are cheap or
243 when codesize is important. e.g., for:
245 void setbit(int *target, int bit) {
246 *target |= (1 << bit);
248 void clearbit(int *target, int bit) {
249 *target &= ~(1 << bit);
252 //===---------------------------------------------------------------------===//
254 Instead of the following for memset char*, 1, 10:
256 movl $16843009, 4(%edx)
257 movl $16843009, (%edx)
260 It might be better to generate
267 when we can spare a register. It reduces code size.
269 //===---------------------------------------------------------------------===//
271 Evaluate what the best way to codegen sdiv X, (2^C) is. For X/8, we currently
288 GCC knows several different ways to codegen it, one of which is this:
298 which is probably slower, but it's interesting at least :)
300 //===---------------------------------------------------------------------===//
302 Should generate min/max for stuff like:
304 void minf(float a, float b, float *X) {
308 Make use of floating point min / max instructions. Perhaps introduce ISD::FMIN
309 and ISD::FMAX node types?
311 //===---------------------------------------------------------------------===//
313 The first BB of this code:
317 %V = call bool %foo()
318 br bool %V, label %T, label %F
335 It would be better to emit "cmp %al, 1" than a xor and test.
337 //===---------------------------------------------------------------------===//
339 Enable X86InstrInfo::convertToThreeAddress().
341 //===---------------------------------------------------------------------===//
343 We are currently lowering large (1MB+) memmove/memcpy to rep/stosl and rep/movsl
344 We should leave these as libcalls for everything over a much lower threshold,
345 since libc is hand tuned for medium and large mem ops (avoiding RFO for large
346 stores, TLB preheating, etc)
348 //===---------------------------------------------------------------------===//
350 Optimize this into something reasonable:
351 x * copysign(1.0, y) * copysign(1.0, z)
353 //===---------------------------------------------------------------------===//
355 Optimize copysign(x, *y) to use an integer load from y.
357 //===---------------------------------------------------------------------===//
359 %X = weak global int 0
362 %N = cast int %N to uint
363 %tmp.24 = setgt int %N, 0
364 br bool %tmp.24, label %no_exit, label %return
367 %indvar = phi uint [ 0, %entry ], [ %indvar.next, %no_exit ]
368 %i.0.0 = cast uint %indvar to int
369 volatile store int %i.0.0, int* %X
370 %indvar.next = add uint %indvar, 1
371 %exitcond = seteq uint %indvar.next, %N
372 br bool %exitcond, label %return, label %no_exit
386 jl LBB_foo_4 # return
387 LBB_foo_1: # no_exit.preheader
390 movl L_X$non_lazy_ptr, %edx
394 jne LBB_foo_2 # no_exit
395 LBB_foo_3: # return.loopexit
399 We should hoist "movl L_X$non_lazy_ptr, %edx" out of the loop after
400 remateralization is implemented. This can be accomplished with 1) a target
401 dependent LICM pass or 2) makeing SelectDAG represent the whole function.
403 //===---------------------------------------------------------------------===//
405 The following tests perform worse with LSR:
407 lambda, siod, optimizer-eval, ackermann, hash2, nestedloop, strcat, and Treesor.
409 //===---------------------------------------------------------------------===//
411 Teach the coalescer to coalesce vregs of different register classes. e.g. FR32 /
414 //===---------------------------------------------------------------------===//
422 Obviously it would have been better for the first mov (or any op) to store
423 directly %esp[0] if there are no other uses.
425 //===---------------------------------------------------------------------===//
427 Adding to the list of cmp / test poor codegen issues:
429 int test(__m128 *A, __m128 *B) {
430 if (_mm_comige_ss(*A, *B))
450 Note the setae, movzbl, cmpl, cmove can be replaced with a single cmovae. There
451 are a number of issues. 1) We are introducing a setcc between the result of the
452 intrisic call and select. 2) The intrinsic is expected to produce a i32 value
453 so a any extend (which becomes a zero extend) is added.
455 We probably need some kind of target DAG combine hook to fix this.
457 //===---------------------------------------------------------------------===//
459 We generate significantly worse code for this than GCC:
460 http://gcc.gnu.org/bugzilla/show_bug.cgi?id=21150
461 http://gcc.gnu.org/bugzilla/attachment.cgi?id=8701
463 There is also one case we do worse on PPC.
465 //===---------------------------------------------------------------------===//
467 If shorter, we should use things like:
472 The former can also be used when the two-addressy nature of the 'and' would
473 require a copy to be inserted (in X86InstrInfo::convertToThreeAddress).
475 //===---------------------------------------------------------------------===//
477 This code generates ugly code, probably due to costs being off or something:
479 void %test(float* %P, <4 x float>* %P2 ) {
480 %xFloat0.688 = load float* %P
481 %loadVector37.712 = load <4 x float>* %P2
482 %inFloat3.713 = insertelement <4 x float> %loadVector37.712, float 0.000000e+00, uint 3
483 store <4 x float> %inFloat3.713, <4 x float>* %P2
491 movd %xmm0, %eax ;; EAX = 0!
494 pinsrw $6, %eax, %xmm0
495 shrl $16, %eax ;; EAX = 0 again!
496 pinsrw $7, %eax, %xmm0
500 It would be better to generate:
506 pinsrw $6, %eax, %xmm0
507 pinsrw $7, %eax, %xmm0
511 or use pxor (to make a zero vector) and shuffle (to insert it).
513 //===---------------------------------------------------------------------===//
517 char foo(int x) { return x; }
525 SIGN_EXTEND_INREG can be implemented as (sext (trunc)) to take advantage of
528 //===---------------------------------------------------------------------===//
532 typedef struct pair { float A, B; } pair;
533 void pairtest(pair P, float *FP) {
537 We currently generate this code with llvmgcc4:
552 we should be able to generate:
560 The issue is that llvmgcc4 is forcing the struct to memory, then passing it as
561 integer chunks. It does this so that structs like {short,short} are passed in
562 a single 32-bit integer stack slot. We should handle the safe cases above much
563 nicer, while still handling the hard cases.
565 //===---------------------------------------------------------------------===//
567 Some ideas for instruction selection code simplification: 1. A pre-pass to
568 determine which chain producing node can or cannot be folded. The generated
569 isel code would then use the information. 2. The same pre-pass can force
570 ordering of TokenFactor operands to allow load / store folding. 3. During isel,
571 instead of recursively going up the chain operand chain, mark the chain operand
572 as available and put it in some work list. Select other nodes in the normal
573 manner. The chain operands are selected after all other nodes are selected. Uses
574 of chain nodes are modified after instruction selection is completed.
576 //===---------------------------------------------------------------------===//
578 Another instruction selector deficiency:
581 %tmp = load int (int)** %foo
582 %tmp = tail call int %tmp( int 3 )
588 movl L_foo$non_lazy_ptr, %eax
594 The current isel scheme will not allow the load to be folded in the call since
595 the load's chain result is read by the callseq_start.
597 //===---------------------------------------------------------------------===//
599 Don't forget to find a way to squash noop truncates in the JIT environment.
601 //===---------------------------------------------------------------------===//
603 Implement anyext in the same manner as truncate that would allow them to be
606 //===---------------------------------------------------------------------===//
608 How about implementing truncate / anyext as a property of machine instruction
609 operand? i.e. Print as 32-bit super-class register / 16-bit sub-class register.
610 Do this for the cases where a truncate / anyext is guaranteed to be eliminated.
611 For IA32 that is truncate from 32 to 16 and anyext from 16 to 32.
613 //===---------------------------------------------------------------------===//
623 imull $3, 4(%esp), %eax
625 Perhaps this is what we really should generate is? Is imull three or four
626 cycles? Note: ICC generates this:
628 leal (%eax,%eax,2), %eax
630 The current instruction priority is based on pattern complexity. The former is
631 more "complex" because it folds a load so the latter will not be emitted.
633 Perhaps we should use AddedComplexity to give LEA32r a higher priority? We
634 should always try to match LEA first since the LEA matching code does some
635 estimate to determine whether the match is profitable.
637 However, if we care more about code size, then imull is better. It's two bytes
638 shorter than movl + leal.
640 //===---------------------------------------------------------------------===//
642 Implement CTTZ, CTLZ with bsf and bsr.
644 //===---------------------------------------------------------------------===//
646 It appears gcc place string data with linkonce linkage in
647 .section __TEXT,__const_coal,coalesced instead of
648 .section __DATA,__const_coal,coalesced.
649 Take a look at darwin.h, there are other Darwin assembler directives that we
652 //===---------------------------------------------------------------------===//
654 We should handle __attribute__ ((__visibility__ ("hidden"))).
656 //===---------------------------------------------------------------------===//
658 int %foo(int* %a, int %t) {
662 cond_true: ; preds = %cond_true, %entry
663 %x.0.0 = phi int [ 0, %entry ], [ %tmp9, %cond_true ] ; <int> [#uses=3]
664 %t_addr.0.0 = phi int [ %t, %entry ], [ %tmp7, %cond_true ] ; <int> [#uses=1]
665 %tmp2 = getelementptr int* %a, int %x.0.0 ; <int*> [#uses=1]
666 %tmp3 = load int* %tmp2 ; <int> [#uses=1]
667 %tmp5 = add int %t_addr.0.0, %x.0.0 ; <int> [#uses=1]
668 %tmp7 = add int %tmp5, %tmp3 ; <int> [#uses=2]
669 %tmp9 = add int %x.0.0, 1 ; <int> [#uses=2]
670 %tmp = setgt int %tmp9, 39 ; <bool> [#uses=1]
671 br bool %tmp, label %bb12, label %cond_true
673 bb12: ; preds = %cond_true
677 is pessimized by -loop-reduce and -indvars
679 //===---------------------------------------------------------------------===//
681 Use cpuid to auto-detect CPU features such as SSE, SSE2, and SSE3.
683 //===---------------------------------------------------------------------===//
685 u32 to float conversion improvement:
687 float uint32_2_float( unsigned u ) {
688 float fl = (int) (u & 0xffff);
689 float fh = (int) (u >> 16);
694 00000000 subl $0x04,%esp
695 00000003 movl 0x08(%esp,1),%eax
696 00000007 movl %eax,%ecx
697 00000009 shrl $0x10,%ecx
698 0000000c cvtsi2ss %ecx,%xmm0
699 00000010 andl $0x0000ffff,%eax
700 00000015 cvtsi2ss %eax,%xmm1
701 00000019 mulss 0x00000078,%xmm0
702 00000021 addss %xmm1,%xmm0
703 00000025 movss %xmm0,(%esp,1)
704 0000002a flds (%esp,1)
705 0000002d addl $0x04,%esp
708 //===---------------------------------------------------------------------===//
710 When using fastcc abi, align stack slot of argument of type double on 8 byte
711 boundary to improve performance.
713 //===---------------------------------------------------------------------===//
717 if ((variable == 4) || (variable == 6)) { stuff }