So icc is smart enough to know that B is in memory so it doesn't load it and
store it back to stack.
+This should be fixed by eliminating the llvm.x86.sse2.loadl.pd intrinsic,
+lowering it to a load+insertelement instead. Already match the load+shuffle
+as movlpd, so this should be easy. We already get optimal code for:
+
+define void @test2(<2 x double>* %r, <2 x double>* %A, double %B) {
+entry:
+ %tmp2 = load <2 x double>* %A, align 16
+ %tmp8 = insertelement <2 x double> %tmp2, double %B, i32 0
+ store <2 x double> %tmp8, <2 x double>* %r, align 16
+ ret void
+}
+
//===---------------------------------------------------------------------===//
__m128d test1( __m128d A, __m128d B) {
This code generates ugly code, probably due to costs being off or something:
-void %test(float* %P, <4 x float>* %P2 ) {
+define void @test(float* %P, <4 x float>* %P2 ) {
%xFloat0.688 = load float* %P
- %loadVector37.712 = load <4 x float>* %P2
- %inFloat3.713 = insertelement <4 x float> %loadVector37.712, float 0.000000e+00, uint 3
+ %tmp = load <4 x float>* %P2
+ %inFloat3.713 = insertelement <4 x float> %tmp, float 0.0, i32 3
store <4 x float> %inFloat3.713, <4 x float>* %P2
ret void
}
Generates:
_test:
- pxor %xmm0, %xmm0
- movd %xmm0, %eax ;; EAX = 0!
- movl 8(%esp), %ecx
- movaps (%ecx), %xmm0
- pinsrw $6, %eax, %xmm0
- shrl $16, %eax ;; EAX = 0 again!
- pinsrw $7, %eax, %xmm0
- movaps %xmm0, (%ecx)
- ret
+ movl 8(%esp), %eax
+ movaps (%eax), %xmm0
+ pxor %xmm1, %xmm1
+ movaps %xmm0, %xmm2
+ shufps $50, %xmm1, %xmm2
+ shufps $132, %xmm2, %xmm0
+ movaps %xmm0, (%eax)
+ ret
-It would be better to generate:
+Would it be better to generate:
_test:
movl 8(%esp), %ecx
movaps %xmm0, (%ecx)
ret
-or use pxor (to make a zero vector) and shuffle (to insert it).
+?
//===---------------------------------------------------------------------===//
//===---------------------------------------------------------------------===//
-This code:
-
-#include <emmintrin.h>
-__m128i test(long long i) { return _mm_cvtsi64x_si128(i); }
-
-Should turn into a single 'movq %rdi, %xmm0' instruction. Instead, we
-get this (on x86-64):
-
-_test:
- movd %rdi, %xmm1
- xorps %xmm0, %xmm0
- movsd %xmm1, %xmm0
- ret
-
-The LLVM IR is:
-
-target triple = "x86_64-apple-darwin8"
-define <2 x i64> @test(i64 %i) {
-entry:
- %tmp10 = insertelement <2 x i64> undef, i64 %i, i32 0
- %tmp11 = insertelement <2 x i64> %tmp10, i64 0, i32 1
- ret <2 x i64> %tmp11
-}
-
-//===---------------------------------------------------------------------===//
-
These functions should produce the same code:
#include <emmintrin.h>
//===---------------------------------------------------------------------===//
-In this loop:
-
-bb49: ; preds = %bb49, %bb49.preheader
- %indvar = phi i32 [ 0, %bb49.preheader ], [ %indvar.next, %bb49 ] ; <i32> [#uses=2]
- %dp.089.0.rec = shl i32 %indvar, 3 ; <i32> [#uses=2]
- %dp.089.0 = getelementptr i32* %tmp89, i32 %dp.089.0.rec ; <i32*> [#uses=1]
- %tmp5051 = bitcast i32* %dp.089.0 to <2 x i64>* ; <<2 x i64>*> [#uses=1]
- store <2 x i64> zeroinitializer, <2 x i64>* %tmp5051, align 16
- %dp.089.0.sum105 = or i32 %dp.089.0.rec, 4 ; <i32> [#uses=1]
- %tmp56 = getelementptr i32* %tmp89, i32 %dp.089.0.sum105 ; <i32*> [#uses=1]
- %tmp5657 = bitcast i32* %tmp56 to <2 x i64>* ; <<2 x i64>*> [#uses=1]
- store <2 x i64> zeroinitializer, <2 x i64>* %tmp5657, align 16
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %tmp98 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb72, label %bb49
-
-we get:
-
-LBB5_6: # bb49.preheader
- shlw $2, %si
- decw %si
- movzwl %si, %eax
- incl %eax
- xorl %ecx, %ecx
-LBB5_7: # bb49
- xorps %xmm0, %xmm0 # (1)
- movaps %xmm0, (%edx)
- movaps %xmm0, 16(%edx)
- addl $32, %edx
- incl %ecx
- cmpl %eax, %ecx
- jne LBB4_7 # bb47
-
-The instruction at (1) can be moved out of the main body of the loop.
-
-//===---------------------------------------------------------------------===//
-
These functions:
#include <xmmintrin.h>
save, as a really-signed value would be undefined for pslld.
+//===---------------------------------------------------------------------===//
+
+#include <math.h>
+int t1(double d) { return signbit(d); }
+
+This currently compiles to:
+ subl $12, %esp
+ movsd 16(%esp), %xmm0
+ movsd %xmm0, (%esp)
+ movl 4(%esp), %eax
+ shrl $31, %eax
+ addl $12, %esp
+ ret
+
+We should use movmskp{s|d} instead.
+
+//===---------------------------------------------------------------------===//
+
+CodeGen/X86/vec_align.ll tests whether we can turn 4 scalar loads into a single
+(aligned) vector load. This functionality has a couple of problems.
+
+1. The code to infer alignment from loads of globals is in the X86 backend,
+ not the dag combiner. This is because dagcombine2 needs to be able to see
+ through the X86ISD::Wrapper node, which DAGCombine can't really do.
+2. The code for turning 4 x load into a single vector load is target
+ independent and should be moved to the dag combiner.
+3. The code for turning 4 x load into a vector load can only handle a direct
+ load from a global or a direct load from the stack. It should be generalized
+ to handle any load from P, P+4, P+8, P+12, where P can be anything.
+4. The alignment inference code cannot handle loads from globals in non-static
+ mode because it doesn't look through the extra dyld stub load. If you try
+ vec_align.ll without -relocation-model=static, you'll see what I mean.
+
+//===---------------------------------------------------------------------===//
+
+We should lower store(fneg(load p), q) into an integer load+xor+store, which
+eliminates a constant pool load. For example, consider:
+
+define i64 @ccosf(float %z.0, float %z.1) nounwind readonly {
+entry:
+ %tmp6 = sub float -0.000000e+00, %z.1 ; <float> [#uses=1]
+ %tmp20 = tail call i64 @ccoshf( float %tmp6, float %z.0 ) nounwind readonly ; <i64> [#uses=1]
+ ret i64 %tmp20
+}
+
+This currently compiles to:
+
+LCPI1_0: # <4 x float>
+ .long 2147483648 # float -0
+ .long 2147483648 # float -0
+ .long 2147483648 # float -0
+ .long 2147483648 # float -0
+_ccosf:
+ subl $12, %esp
+ movss 16(%esp), %xmm0
+ movss %xmm0, 4(%esp)
+ movss 20(%esp), %xmm0
+ xorps LCPI1_0, %xmm0
+ movss %xmm0, (%esp)
+ call L_ccoshf$stub
+ addl $12, %esp
+ ret
+
+Note the load into xmm0, then xor (to negate), then store. In PIC mode,
+this code computes the pic base and does two loads to do the constant pool
+load, so the improvement is much bigger.
+
+The tricky part about this xform is that the argument load/store isn't exposed
+until post-legalize, and at that point, the fneg has been custom expanded into
+an X86 fxor. This means that we need to handle this case in the x86 backend
+instead of in target independent code.
+
//===---------------------------------------------------------------------===//