1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX
4 ; Verify that we select the correct version of the instruction that stores the low 64-bits
5 ; of a 128-bit vector. We want to avoid int/fp domain crossing penalties, so ignore the
6 ; bitcast ops and choose:
12 define void @store_floats(<4 x float> %x, i64* %p) {
13 ; SSE-LABEL: store_floats:
15 ; SSE-NEXT: addps %xmm0, %xmm0
16 ; SSE-NEXT: movlps %xmm0, (%rdi)
19 ; AVX-LABEL: store_floats:
21 ; AVX-NEXT: vaddps %xmm0, %xmm0, %xmm0
24 ; !!! FIXME - the AVX version is not handled correctly.
25 ; AVX-NEXT: vmovq %xmm0, (%rdi)
29 %a = fadd <4 x float> %x, %x
30 %b = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1>
31 %c = bitcast <2 x float> %b to i64
36 define void @store_double(<2 x double> %x, i64* %p) {
37 ; SSE-LABEL: store_double:
39 ; SSE-NEXT: addpd %xmm0, %xmm0
40 ; SSE-NEXT: movlpd %xmm0, (%rdi)
43 ; AVX-LABEL: store_double:
45 ; AVX-NEXT: vaddpd %xmm0, %xmm0, %xmm0
46 ; AVX-NEXT: vmovlpd %xmm0, (%rdi)
48 %a = fadd <2 x double> %x, %x
49 %b = extractelement <2 x double> %a, i32 0
50 %c = bitcast double %b to i64
55 define void @store_int(<4 x i32> %x, <2 x float>* %p) {
56 ; SSE-LABEL: store_int:
58 ; SSE-NEXT: paddd %xmm0, %xmm0
59 ; SSE-NEXT: movq %xmm0, (%rdi)
62 ; AVX-LABEL: store_int:
64 ; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
65 ; AVX-NEXT: vmovq %xmm0, (%rdi)
67 %a = add <4 x i32> %x, %x
68 %b = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
69 %c = bitcast <2 x i32> %b to <2 x float>
70 store <2 x float> %c, <2 x float>* %p