1 ; RUN: llc -march=x86-64 < %s | FileCheck %s
3 ; When doing vector gather-scatter index calculation with 32-bit indices,
4 ; bounce the vector off of cache rather than shuffling each individual
5 ; element out of the index vector.
7 ; CHECK: pand (%rdx), %xmm0
8 ; CHECK: movaps %xmm0, -24(%rsp)
9 ; CHECK: movslq -24(%rsp), %rax
10 ; CHECK: movsd (%rdi,%rax,8), %xmm0
11 ; CHECK: movslq -20(%rsp), %rax
12 ; CHECK: movhpd (%rdi,%rax,8), %xmm0
13 ; CHECK: movslq -16(%rsp), %rax
14 ; CHECK: movsd (%rdi,%rax,8), %xmm1
15 ; CHECK: movslq -12(%rsp), %rax
16 ; CHECK: movhpd (%rdi,%rax,8), %xmm1
18 define <4 x double> @foo(double* %p, <4 x i32>* %i, <4 x i32>* %h) nounwind {
19 %a = load <4 x i32>* %i
20 %b = load <4 x i32>* %h
21 %j = and <4 x i32> %a, %b
22 %d0 = extractelement <4 x i32> %j, i32 0
23 %d1 = extractelement <4 x i32> %j, i32 1
24 %d2 = extractelement <4 x i32> %j, i32 2
25 %d3 = extractelement <4 x i32> %j, i32 3
26 %q0 = getelementptr double* %p, i32 %d0
27 %q1 = getelementptr double* %p, i32 %d1
28 %q2 = getelementptr double* %p, i32 %d2
29 %q3 = getelementptr double* %p, i32 %d3
30 %r0 = load double* %q0
31 %r1 = load double* %q1
32 %r2 = load double* %q2
33 %r3 = load double* %q3
34 %v0 = insertelement <4 x double> undef, double %r0, i32 0
35 %v1 = insertelement <4 x double> %v0, double %r1, i32 1
36 %v2 = insertelement <4 x double> %v1, double %r2, i32 2
37 %v3 = insertelement <4 x double> %v2, double %r3, i32 3