// Random ideas for the X86 backend: SSE-specific stuff.
//===---------------------------------------------------------------------===//
+- Consider eliminating the unaligned SSE load intrinsics, replacing them with
+ unaligned LLVM load instructions.
+
+//===---------------------------------------------------------------------===//
+
+Expand libm rounding functions inline: Significant speedups possible.
+http://gcc.gnu.org/ml/gcc-patches/2006-10/msg00909.html
+
//===---------------------------------------------------------------------===//
When compiled with unsafemath enabled, "main" should enable SSE DAZ mode and
time, not at spiller time). *Note* however that this can only be done
if Y is dead. Here's a testcase:
-%.str_3 = external global [15 x sbyte] ; <[15 x sbyte]*> [#uses=0]
-implementation ; Functions:
-declare void %printf(int, ...)
-void %main() {
+@.str_3 = external global [15 x i8]
+declare void @printf(i32, ...)
+define void @main() {
build_tree.exit:
- br label %no_exit.i7
-no_exit.i7: ; preds = %no_exit.i7, %build_tree.exit
- %tmp.0.1.0.i9 = phi double [ 0.000000e+00, %build_tree.exit ], [ %tmp.34.i18, %no_exit.i7 ] ; <double> [#uses=1]
- %tmp.0.0.0.i10 = phi double [ 0.000000e+00, %build_tree.exit ], [ %tmp.28.i16, %no_exit.i7 ] ; <double> [#uses=1]
- %tmp.28.i16 = add double %tmp.0.0.0.i10, 0.000000e+00
- %tmp.34.i18 = add double %tmp.0.1.0.i9, 0.000000e+00
- br bool false, label %Compute_Tree.exit23, label %no_exit.i7
-Compute_Tree.exit23: ; preds = %no_exit.i7
- tail call void (int, ...)* %printf( int 0 )
- store double %tmp.34.i18, double* null
- ret void
+ br label %no_exit.i7
+
+no_exit.i7: ; preds = %no_exit.i7, %build_tree.exit
+ %tmp.0.1.0.i9 = phi double [ 0.000000e+00, %build_tree.exit ],
+ [ %tmp.34.i18, %no_exit.i7 ]
+ %tmp.0.0.0.i10 = phi double [ 0.000000e+00, %build_tree.exit ],
+ [ %tmp.28.i16, %no_exit.i7 ]
+ %tmp.28.i16 = add double %tmp.0.0.0.i10, 0.000000e+00
+ %tmp.34.i18 = add double %tmp.0.1.0.i9, 0.000000e+00
+ br i1 false, label %Compute_Tree.exit23, label %no_exit.i7
+
+Compute_Tree.exit23: ; preds = %no_exit.i7
+ tail call void (i32, ...)* @printf( i32 0 )
+ store double %tmp.34.i18, double* null
+ ret void
}
We currently emit:
//===---------------------------------------------------------------------===//
-Currently the x86 codegen isn't very good at mixing SSE and FPStack
-code:
-
-unsigned int foo(double x) { return x; }
-
-foo:
- subl $20, %esp
- movsd 24(%esp), %xmm0
- movsd %xmm0, 8(%esp)
- fldl 8(%esp)
- fisttpll (%esp)
- movl (%esp), %eax
- addl $20, %esp
- ret
-
-This will be solved when we go to a dynamic programming based isel.
-
-//===---------------------------------------------------------------------===//
-
-Should generate min/max for stuff like:
-
-void minf(float a, float b, float *X) {
- *X = a <= b ? a : b;
-}
-
-Make use of floating point min / max instructions. Perhaps introduce ISD::FMIN
-and ISD::FMAX node types?
-
-//===---------------------------------------------------------------------===//
-
-The first BB of this code:
-
-declare bool %foo()
-int %bar() {
- %V = call bool %foo()
- br bool %V, label %T, label %F
-T:
- ret int 1
-F:
- call bool %foo()
- ret int 12
-}
-
-compiles to:
-
-_bar:
- subl $12, %esp
- call L_foo$stub
- xorb $1, %al
- testb %al, %al
- jne LBB_bar_2 # F
-
-It would be better to emit "cmp %al, 1" than a xor and test.
-
-//===---------------------------------------------------------------------===//
-
Lower memcpy / memset to a series of SSE 128 bit move instructions when it's
feasible.
//===---------------------------------------------------------------------===//
-Teach the coalescer to commute 2-addr instructions, allowing us to eliminate
-the reg-reg copy in this example:
-
-float foo(int *x, float *y, unsigned c) {
- float res = 0.0;
- unsigned i;
- for (i = 0; i < c; i++) {
- float xx = (float)x[i];
- xx = xx * y[i];
- xx += res;
- res = xx;
- }
- return res;
-}
-
-LBB_foo_3: # no_exit
- cvtsi2ss %XMM0, DWORD PTR [%EDX + 4*%ESI]
- mulss %XMM0, DWORD PTR [%EAX + 4*%ESI]
- addss %XMM0, %XMM1
- inc %ESI
- cmp %ESI, %ECX
-**** movaps %XMM1, %XMM0
- jb LBB_foo_3 # no_exit
-
-//===---------------------------------------------------------------------===//
-
Codegen:
if (copysign(1.0, x) == copysign(1.0, y))
into:
//===---------------------------------------------------------------------===//
-Better codegen for:
-
-void f(float a, float b, vector float * out) { *out = (vector float){ a, 0.0, 0.0, b}; }
-void f(float a, float b, vector float * out) { *out = (vector float){ a, b, 0.0, 0}; }
-
-For the later we generate:
-
-_f:
- pxor %xmm0, %xmm0
- movss 8(%esp), %xmm1
- movaps %xmm0, %xmm2
- unpcklps %xmm1, %xmm2
- movss 4(%esp), %xmm1
- unpcklps %xmm0, %xmm1
- unpcklps %xmm2, %xmm1
- movl 12(%esp), %eax
- movaps %xmm1, (%eax)
- ret
-
-This seems like it should use shufps, one for each of a & b.
-
-//===---------------------------------------------------------------------===//
-
How to decide when to use the "floating point version" of logical ops? Here are
some code fragments:
//===---------------------------------------------------------------------===//
-Use movddup to splat a v2f64 directly from a memory source. e.g.
-
-#include <emmintrin.h>
-
-void test(__m128d *r, double A) {
- *r = _mm_set1_pd(A);
-}
-
-llc:
-
-_test:
- movsd 8(%esp), %xmm0
- unpcklpd %xmm0, %xmm0
- movl 4(%esp), %eax
- movapd %xmm0, (%eax)
- ret
-
-icc:
-
-_test:
- movl 4(%esp), %eax
- movddup 8(%esp), %xmm0
- movapd %xmm0, (%eax)
- ret
-
-//===---------------------------------------------------------------------===//
-
X86RegisterInfo::copyRegToReg() returns X86::MOVAPSrr for VR128. Is it possible
to choose between movaps, movapd, and movdqa based on types of source and
destination?
//===---------------------------------------------------------------------===//
-We are emitting bad code for this:
-
-float %test(float* %V, int %I, int %D, float %V) {
-entry:
- %tmp = seteq int %D, 0
- br bool %tmp, label %cond_true, label %cond_false23
-
-cond_true:
- %tmp3 = getelementptr float* %V, int %I
- %tmp = load float* %tmp3
- %tmp5 = setgt float %tmp, %V
- %tmp6 = tail call bool %llvm.isunordered.f32( float %tmp, float %V )
- %tmp7 = or bool %tmp5, %tmp6
- br bool %tmp7, label %UnifiedReturnBlock, label %cond_next
-
-cond_next:
- %tmp10 = add int %I, 1
- %tmp12 = getelementptr float* %V, int %tmp10
- %tmp13 = load float* %tmp12
- %tmp15 = setle float %tmp13, %V
- %tmp16 = tail call bool %llvm.isunordered.f32( float %tmp13, float %V )
- %tmp17 = or bool %tmp15, %tmp16
- %retval = select bool %tmp17, float 0.000000e+00, float 1.000000e+00
- ret float %retval
-
-cond_false23:
- %tmp28 = tail call float %foo( float* %V, int %I, int %D, float %V )
- ret float %tmp28
-
-UnifiedReturnBlock: ; preds = %cond_true
- ret float 0.000000e+00
-}
-
-declare bool %llvm.isunordered.f32(float, float)
-
-declare float %foo(float*, int, int, float)
-
-
-It exposes a known load folding problem:
-
- movss (%edx,%ecx,4), %xmm1
- ucomiss %xmm1, %xmm0
-
-As well as this:
-
-LBB_test_2: # cond_next
- movss LCPI1_0, %xmm2
- pxor %xmm3, %xmm3
- ucomiss %xmm0, %xmm1
- jbe LBB_test_6 # cond_next
-LBB_test_5: # cond_next
- movaps %xmm2, %xmm3
-LBB_test_6: # cond_next
- movss %xmm3, 40(%esp)
- flds 40(%esp)
- addl $44, %esp
- ret
-
-Clearly it's unnecessary to clear %xmm3. It's also not clear why we are emitting
-three moves (movss, movaps, movss).
-
-//===---------------------------------------------------------------------===//
-
External test Nurbs exposed some problems. Look for
__ZN15Nurbs_SSE_Cubic17TessellateSurfaceE, bb cond_next140. This is what icc
emits:
So icc is smart enough to know that B is in memory so it doesn't load it and
store it back to stack.
+This should be fixed by eliminating the llvm.x86.sse2.loadl.pd intrinsic,
+lowering it to a load+insertelement instead. Already match the load+shuffle
+as movlpd, so this should be easy. We already get optimal code for:
+
+define void @test2(<2 x double>* %r, <2 x double>* %A, double %B) {
+entry:
+ %tmp2 = load <2 x double>* %A, align 16
+ %tmp8 = insertelement <2 x double> %tmp2, double %B, i32 0
+ store <2 x double> %tmp8, <2 x double>* %r, align 16
+ ret void
+}
+
//===---------------------------------------------------------------------===//
__m128d test1( __m128d A, __m128d B) {
This code generates ugly code, probably due to costs being off or something:
-void %test(float* %P, <4 x float>* %P2 ) {
+define void @test(float* %P, <4 x float>* %P2 ) {
%xFloat0.688 = load float* %P
- %loadVector37.712 = load <4 x float>* %P2
- %inFloat3.713 = insertelement <4 x float> %loadVector37.712, float 0.000000e+00, uint 3
+ %tmp = load <4 x float>* %P2
+ %inFloat3.713 = insertelement <4 x float> %tmp, float 0.0, i32 3
store <4 x float> %inFloat3.713, <4 x float>* %P2
ret void
}
Generates:
_test:
- pxor %xmm0, %xmm0
- movd %xmm0, %eax ;; EAX = 0!
- movl 8(%esp), %ecx
- movaps (%ecx), %xmm0
- pinsrw $6, %eax, %xmm0
- shrl $16, %eax ;; EAX = 0 again!
- pinsrw $7, %eax, %xmm0
- movaps %xmm0, (%ecx)
- ret
+ movl 8(%esp), %eax
+ movaps (%eax), %xmm0
+ pxor %xmm1, %xmm1
+ movaps %xmm0, %xmm2
+ shufps $50, %xmm1, %xmm2
+ shufps $132, %xmm2, %xmm0
+ movaps %xmm0, (%eax)
+ ret
-It would be better to generate:
+Would it be better to generate:
_test:
movl 8(%esp), %ecx
movaps %xmm0, (%ecx)
ret
-or use pxor (to make a zero vector) and shuffle (to insert it).
+?
//===---------------------------------------------------------------------===//
//===---------------------------------------------------------------------===//
-Implement some missing insert/extract element operations without going through
-the stack. Testcase here:
-CodeGen/X86/vec_ins_extract.ll
-corresponds to this C code:
+Apply the same transformation that merged four float into a single 128-bit load
+to loads from constant pool.
+
+//===---------------------------------------------------------------------===//
-typedef float vectorfloat __attribute__((vector_size(16)));
-void test(vectorfloat *F, float f) {
- vectorfloat G = *F + *F;
- *((float*)&G) = f;
- *F = G + G;
+Floating point max / min are commutable when -enable-unsafe-fp-path is
+specified. We should turn int_x86_sse_max_ss and X86ISD::FMIN etc. into other
+nodes which are selected to max / min instructions that are marked commutable.
+
+//===---------------------------------------------------------------------===//
+
+We should compile this:
+#include <xmmintrin.h>
+typedef union {
+ int i[4];
+ float f[4];
+ __m128 v;
+} vector4_t;
+void swizzle (const void *a, vector4_t * b, vector4_t * c) {
+ b->v = _mm_loadl_pi (b->v, (__m64 *) a);
+ c->v = _mm_loadl_pi (c->v, ((__m64 *) a) + 1);
}
-void test2(vectorfloat *F, float f) {
- vectorfloat G = *F + *F;
- ((float*)&G)[2] = f;
- *F = G + G;
+
+to:
+
+_swizzle:
+ movl 4(%esp), %eax
+ movl 8(%esp), %edx
+ movl 12(%esp), %ecx
+ movlps (%eax), %xmm0
+ movlps %xmm0, (%edx)
+ movlps 8(%eax), %xmm0
+ movlps %xmm0, (%ecx)
+ ret
+
+not:
+
+swizzle:
+ movl 8(%esp), %eax
+ movaps (%eax), %xmm0
+ movl 4(%esp), %ecx
+ movlps (%ecx), %xmm0
+ movaps %xmm0, (%eax)
+ movl 12(%esp), %eax
+ movaps (%eax), %xmm0
+ movlps 8(%ecx), %xmm0
+ movaps %xmm0, (%eax)
+ ret
+
+//===---------------------------------------------------------------------===//
+
+These functions should produce the same code:
+
+#include <emmintrin.h>
+
+typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+
+int foo(__m128i* val) {
+ return __builtin_ia32_vec_ext_v4si(*val, 1);
}
-void test3(vectorfloat *F, float *f) {
- vectorfloat G = *F + *F;
- *f = ((float*)&G)[2];
+int bar(__m128i* val) {
+ union vs {
+ __m128i *_v;
+ int* _s;
+ } v = {val};
+ return v._s[1];
}
-void test4(vectorfloat *F, float *f) {
- vectorfloat G = *F + *F;
- *f = *((float*)&G);
+
+We currently produce (with -m64):
+
+_foo:
+ pshufd $1, (%rdi), %xmm0
+ movd %xmm0, %eax
+ ret
+_bar:
+ movl 4(%rdi), %eax
+ ret
+
+//===---------------------------------------------------------------------===//
+
+We should materialize vector constants like "all ones" and "signbit" with
+code like:
+
+ cmpeqps xmm1, xmm1 ; xmm1 = all-ones
+
+and:
+ cmpeqps xmm1, xmm1 ; xmm1 = all-ones
+ psrlq xmm1, 31 ; xmm1 = all 100000000000...
+
+instead of using a load from the constant pool. The later is important for
+ABS/NEG/copysign etc.
+
+//===---------------------------------------------------------------------===//
+
+These functions:
+
+#include <xmmintrin.h>
+__m128i a;
+void x(unsigned short n) {
+ a = _mm_slli_epi32 (a, n);
}
+void y(unsigned n) {
+ a = _mm_slli_epi32 (a, n);
+}
+
+compile to ( -O3 -static -fomit-frame-pointer):
+_x:
+ movzwl 4(%esp), %eax
+ movd %eax, %xmm0
+ movaps _a, %xmm1
+ pslld %xmm0, %xmm1
+ movaps %xmm1, _a
+ ret
+_y:
+ movd 4(%esp), %xmm0
+ movaps _a, %xmm1
+ pslld %xmm0, %xmm1
+ movaps %xmm1, _a
+ ret
+
+"y" looks good, but "x" does silly movzwl stuff around into a GPR. It seems
+like movd would be sufficient in both cases as the value is already zero
+extended in the 32-bit stack slot IIRC. For signed short, it should also be
+save, as a really-signed value would be undefined for pslld.
+
//===---------------------------------------------------------------------===//
+
+#include <math.h>
+int t1(double d) { return signbit(d); }
+
+This currently compiles to:
+ subl $12, %esp
+ movsd 16(%esp), %xmm0
+ movsd %xmm0, (%esp)
+ movl 4(%esp), %eax
+ shrl $31, %eax
+ addl $12, %esp
+ ret
+
+We should use movmskp{s|d} instead.
+
+//===---------------------------------------------------------------------===//
+
+CodeGen/X86/vec_align.ll tests whether we can turn 4 scalar loads into a single
+(aligned) vector load. This functionality has a couple of problems.
+
+1. The code to infer alignment from loads of globals is in the X86 backend,
+ not the dag combiner. This is because dagcombine2 needs to be able to see
+ through the X86ISD::Wrapper node, which DAGCombine can't really do.
+2. The code for turning 4 x load into a single vector load is target
+ independent and should be moved to the dag combiner.
+3. The code for turning 4 x load into a vector load can only handle a direct
+ load from a global or a direct load from the stack. It should be generalized
+ to handle any load from P, P+4, P+8, P+12, where P can be anything.
+4. The alignment inference code cannot handle loads from globals in non-static
+ mode because it doesn't look through the extra dyld stub load. If you try
+ vec_align.ll without -relocation-model=static, you'll see what I mean.
+
+//===---------------------------------------------------------------------===//
+
+We should lower store(fneg(load p), q) into an integer load+xor+store, which
+eliminates a constant pool load. For example, consider:
+
+define i64 @ccosf(float %z.0, float %z.1) nounwind readonly {
+entry:
+ %tmp6 = sub float -0.000000e+00, %z.1 ; <float> [#uses=1]
+ %tmp20 = tail call i64 @ccoshf( float %tmp6, float %z.0 ) nounwind readonly
+ ret i64 %tmp20
+}
+
+This currently compiles to:
+
+LCPI1_0: # <4 x float>
+ .long 2147483648 # float -0
+ .long 2147483648 # float -0
+ .long 2147483648 # float -0
+ .long 2147483648 # float -0
+_ccosf:
+ subl $12, %esp
+ movss 16(%esp), %xmm0
+ movss %xmm0, 4(%esp)
+ movss 20(%esp), %xmm0
+ xorps LCPI1_0, %xmm0
+ movss %xmm0, (%esp)
+ call L_ccoshf$stub
+ addl $12, %esp
+ ret
+
+Note the load into xmm0, then xor (to negate), then store. In PIC mode,
+this code computes the pic base and does two loads to do the constant pool
+load, so the improvement is much bigger.
+
+The tricky part about this xform is that the argument load/store isn't exposed
+until post-legalize, and at that point, the fneg has been custom expanded into
+an X86 fxor. This means that we need to handle this case in the x86 backend
+instead of in target independent code.
+
+//===---------------------------------------------------------------------===//
+
+Non-SSE4 insert into 16 x i8 is atrociously bad.
+
+//===---------------------------------------------------------------------===//
+
+<2 x i64> extract is substantially worse than <2 x f64>, even if the destination
+is memory.
+
+//===---------------------------------------------------------------------===//
+
+SSE4 extract-to-mem ops aren't being pattern matched because of the AssertZext
+sitting between the truncate and the extract.
+
+//===---------------------------------------------------------------------===//
+
+INSERTPS can match any insert (extract, imm1), imm2 for 4 x float, and insert
+any number of 0.0 simultaneously. Currently we only use it for simple
+insertions.
+
+See comments in LowerINSERT_VECTOR_ELT_SSE4.
+
+//===---------------------------------------------------------------------===//
+
+On a random note, SSE2 should declare insert/extract of 2 x f64 as legal, not
+Custom. All combinations of insert/extract reg-reg, reg-mem, and mem-reg are
+legal, it'll just take a few extra patterns written in the .td file.
+
+Note: this is not a code quality issue; the custom lowered code happens to be
+right, but we shouldn't have to custom lower anything. This is probably related
+to <2 x i64> ops being so bad.
+
+//===---------------------------------------------------------------------===//
+
+'select' on vectors and scalars could be a whole lot better. We currently
+lower them to conditional branches. On x86-64 for example, we compile this:
+
+double test(double a, double b, double c, double d) { return a<b ? c : d; }
+
+to:
+
+_test:
+ ucomisd %xmm0, %xmm1
+ ja LBB1_2 # entry
+LBB1_1: # entry
+ movapd %xmm3, %xmm2
+LBB1_2: # entry
+ movapd %xmm2, %xmm0
+ ret
+
+instead of:
+
+_test:
+ cmpltsd %xmm1, %xmm0
+ andpd %xmm0, %xmm2
+ andnpd %xmm3, %xmm0
+ orpd %xmm2, %xmm0
+ ret
+
+For unpredictable branches, the later is much more efficient. This should
+just be a matter of having scalar sse map to SELECT_CC and custom expanding
+or iseling it.
+
+//===---------------------------------------------------------------------===//
+
+Take the following code:
+
+#include <xmmintrin.h>
+__m128i doload64(short x) {return _mm_set_epi16(x,x,x,x,x,x,x,x);}
+
+LLVM currently generates the following on x86:
+doload64:
+ movzwl 4(%esp), %eax
+ movd %eax, %xmm0
+ punpcklwd %xmm0, %xmm0
+ pshufd $0, %xmm0, %xmm0
+ ret
+
+gcc's generated code:
+doload64:
+ movd 4(%esp), %xmm0
+ punpcklwd %xmm0, %xmm0
+ pshufd $0, %xmm0, %xmm0
+ ret
+
+LLVM should be able to generate the same thing as gcc. This looks like it is
+just a matter of matching (scalar_to_vector (load x)) to movd.
+
+//===---------------------------------------------------------------------===//
+