1 ; RUN: llc -march=r600 -mcpu=tahiti < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
3 ; FUNC-LABEL: {{^}}test_copy_v4i8:
4 ; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
5 ; SI: BUFFER_STORE_DWORD [[REG]]
7 define void @test_copy_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
8 %val = load <4 x i8> addrspace(1)* %in, align 4
9 store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
13 ; FUNC-LABEL: {{^}}test_copy_v4i8_x2:
14 ; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
15 ; SI: BUFFER_STORE_DWORD [[REG]]
16 ; SI: BUFFER_STORE_DWORD [[REG]]
18 define void @test_copy_v4i8_x2(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
19 %val = load <4 x i8> addrspace(1)* %in, align 4
20 store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
21 store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
25 ; FUNC-LABEL: {{^}}test_copy_v4i8_x3:
26 ; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
27 ; SI: BUFFER_STORE_DWORD [[REG]]
28 ; SI: BUFFER_STORE_DWORD [[REG]]
29 ; SI: BUFFER_STORE_DWORD [[REG]]
31 define void @test_copy_v4i8_x3(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
32 %val = load <4 x i8> addrspace(1)* %in, align 4
33 store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
34 store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
35 store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
39 ; FUNC-LABEL: {{^}}test_copy_v4i8_x4:
40 ; SI: BUFFER_LOAD_DWORD [[REG:v[0-9]+]]
41 ; SI: BUFFER_STORE_DWORD [[REG]]
42 ; SI: BUFFER_STORE_DWORD [[REG]]
43 ; SI: BUFFER_STORE_DWORD [[REG]]
44 ; SI: BUFFER_STORE_DWORD [[REG]]
46 define void @test_copy_v4i8_x4(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %out3, <4 x i8> addrspace(1)* %in) nounwind {
47 %val = load <4 x i8> addrspace(1)* %in, align 4
48 store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
49 store <4 x i8> %val, <4 x i8> addrspace(1)* %out1, align 4
50 store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
51 store <4 x i8> %val, <4 x i8> addrspace(1)* %out3, align 4
55 ; FUNC-LABEL: {{^}}test_copy_v4i8_extra_use:
56 ; SI: BUFFER_LOAD_UBYTE
57 ; SI: BUFFER_LOAD_UBYTE
58 ; SI: BUFFER_LOAD_UBYTE
59 ; SI: BUFFER_LOAD_UBYTE
64 ; SI-DAG: BUFFER_STORE_BYTE
65 ; SI-DAG: BUFFER_STORE_BYTE
66 ; SI-DAG: BUFFER_STORE_BYTE
67 ; SI-DAG: BUFFER_STORE_BYTE
68 ; SI-DAG: BUFFER_STORE_BYTE
69 ; SI-DAG: BUFFER_STORE_BYTE
70 ; SI-DAG: BUFFER_STORE_BYTE
71 ; SI_DAG: BUFFER_STORE_BYTE
73 ; After scalarizing v4i8 loads is fixed.
74 ; XSI: BUFFER_LOAD_DWORD
79 ; XSI: BUFFER_STORE_DWORD
80 ; XSI: BUFFER_STORE_DWORD
83 define void @test_copy_v4i8_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %in) nounwind {
84 %val = load <4 x i8> addrspace(1)* %in, align 4
85 %add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
86 store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
87 store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
91 ; FUNC-LABEL: {{^}}test_copy_v4i8_x2_extra_use:
92 ; SI: BUFFER_LOAD_UBYTE
93 ; SI: BUFFER_LOAD_UBYTE
94 ; SI: BUFFER_LOAD_UBYTE
95 ; SI: BUFFER_LOAD_UBYTE
100 ; SI-DAG: BUFFER_STORE_BYTE
101 ; SI-DAG: BUFFER_STORE_BYTE
102 ; SI-DAG: BUFFER_STORE_BYTE
103 ; SI-DAG: BUFFER_STORE_BYTE
104 ; SI-DAG: BUFFER_STORE_BYTE
105 ; SI-DAG: BUFFER_STORE_BYTE
106 ; SI-DAG: BUFFER_STORE_BYTE
107 ; SI_DAG: BUFFER_STORE_BYTE
108 ; SI-DAG: BUFFER_STORE_BYTE
109 ; SI-DAG: BUFFER_STORE_BYTE
110 ; SI-DAG: BUFFER_STORE_BYTE
111 ; SI_DAG: BUFFER_STORE_BYTE
113 ; XSI: BUFFER_LOAD_DWORD
115 ; XSI: BUFFER_STORE_DWORD
117 ; XSI: BUFFER_STORE_DWORD
118 ; XSI-NEXT: BUFFER_STORE_DWORD
121 define void @test_copy_v4i8_x2_extra_use(<4 x i8> addrspace(1)* %out0, <4 x i8> addrspace(1)* %out1, <4 x i8> addrspace(1)* %out2, <4 x i8> addrspace(1)* %in) nounwind {
122 %val = load <4 x i8> addrspace(1)* %in, align 4
123 %add = add <4 x i8> %val, <i8 9, i8 9, i8 9, i8 9>
124 store <4 x i8> %val, <4 x i8> addrspace(1)* %out0, align 4
125 store <4 x i8> %add, <4 x i8> addrspace(1)* %out1, align 4
126 store <4 x i8> %val, <4 x i8> addrspace(1)* %out2, align 4
130 ; FUNC-LABEL: {{^}}test_copy_v3i8:
134 define void @test_copy_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %in) nounwind {
135 %val = load <3 x i8> addrspace(1)* %in, align 4
136 store <3 x i8> %val, <3 x i8> addrspace(1)* %out, align 4
140 ; FUNC-LABEL: {{^}}test_copy_v4i8_volatile_load:
141 ; SI: BUFFER_LOAD_UBYTE
142 ; SI: BUFFER_LOAD_UBYTE
143 ; SI: BUFFER_LOAD_UBYTE
144 ; SI: BUFFER_LOAD_UBYTE
146 define void @test_copy_v4i8_volatile_load(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
147 %val = load volatile <4 x i8> addrspace(1)* %in, align 4
148 store <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4
152 ; FUNC-LABEL: {{^}}test_copy_v4i8_volatile_store:
153 ; SI: BUFFER_LOAD_UBYTE
154 ; SI: BUFFER_LOAD_UBYTE
155 ; SI: BUFFER_LOAD_UBYTE
156 ; SI: BUFFER_LOAD_UBYTE
157 ; SI: BUFFER_STORE_BYTE
158 ; SI: BUFFER_STORE_BYTE
159 ; SI: BUFFER_STORE_BYTE
160 ; SI: BUFFER_STORE_BYTE
162 define void @test_copy_v4i8_volatile_store(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
163 %val = load <4 x i8> addrspace(1)* %in, align 4
164 store volatile <4 x i8> %val, <4 x i8> addrspace(1)* %out, align 4