1 ; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
3 declare i32 @llvm.r600.read.tidig.x() #0
4 declare i32 @llvm.r600.read.tidig.y() #0
6 ; In this test both the pointer and the offset operands to the
7 ; BUFFER_LOAD instructions end up being stored in vgprs. This
8 ; requires us to add the pointer and offset together, store the
9 ; result in the offset operand (vaddr), and then store 0 in an
10 ; sgpr register pair and use that for the pointer operand
11 ; (low 64-bits of srsrc).
13 ; GCN-LABEL: {{^}}mubuf:
15 ; Make sure we aren't using VGPRs for the source operand of s_mov_b64
16 ; GCN-NOT: s_mov_b64 s[{{[0-9]+:[0-9]+}}], v
18 ; Make sure we aren't using VGPR's for the srsrc operand of BUFFER_LOAD_*
20 ; GCN: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
21 ; GCN: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
23 define void @mubuf(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #1 {
25 %tmp = call i32 @llvm.r600.read.tidig.x()
26 %tmp1 = call i32 @llvm.r600.read.tidig.y()
27 %tmp2 = sext i32 %tmp to i64
28 %tmp3 = sext i32 %tmp1 to i64
31 loop: ; preds = %loop, %entry
32 %tmp4 = phi i64 [ 0, %entry ], [ %tmp5, %loop ]
33 %tmp5 = add i64 %tmp2, %tmp4
34 %tmp6 = getelementptr i8, i8 addrspace(1)* %in, i64 %tmp5
35 %tmp7 = load i8, i8 addrspace(1)* %tmp6, align 1
36 %tmp8 = or i64 %tmp5, 1
37 %tmp9 = getelementptr i8, i8 addrspace(1)* %in, i64 %tmp8
38 %tmp10 = load i8, i8 addrspace(1)* %tmp9, align 1
39 %tmp11 = add i8 %tmp7, %tmp10
40 %tmp12 = sext i8 %tmp11 to i32
41 store i32 %tmp12, i32 addrspace(1)* %out
42 %tmp13 = icmp slt i64 %tmp5, 10
43 br i1 %tmp13, label %loop, label %done
49 ; Test moving an SMRD instruction to the VALU
51 ; GCN-LABEL: {{^}}smrd_valu:
52 ; GCN: buffer_load_dword [[OUT:v[0-9]+]]
53 ; GCN: buffer_store_dword [[OUT]]
54 define void @smrd_valu(i32 addrspace(2)* addrspace(1)* %in, i32 %a, i32 %b, i32 addrspace(1)* %out) #1 {
56 %tmp = icmp ne i32 %a, 0
57 br i1 %tmp, label %if, label %else
60 %tmp1 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %in
63 else: ; preds = %entry
64 %tmp2 = getelementptr i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %in
65 %tmp3 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %tmp2
68 endif: ; preds = %else, %if
69 %tmp4 = phi i32 addrspace(2)* [ %tmp1, %if ], [ %tmp3, %else ]
70 %tmp5 = getelementptr i32, i32 addrspace(2)* %tmp4, i32 3000
71 %tmp6 = load i32, i32 addrspace(2)* %tmp5
72 store i32 %tmp6, i32 addrspace(1)* %out
76 ; Test moving an SMRD with an immediate offset to the VALU
78 ; GCN-LABEL: {{^}}smrd_valu2:
79 ; GCN: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
80 define void @smrd_valu2(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in) #1 {
82 %tmp = call i32 @llvm.r600.read.tidig.x() #0
83 %tmp1 = add i32 %tmp, 4
84 %tmp2 = getelementptr [8 x i32], [8 x i32] addrspace(2)* %in, i32 %tmp, i32 4
85 %tmp3 = load i32, i32 addrspace(2)* %tmp2
86 store i32 %tmp3, i32 addrspace(1)* %out
90 ; GCN-LABEL: {{^}}smrd_valu2_salu_user:
91 ; GCN: buffer_load_dword [[MOVED:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}}
92 ; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, s{{[0-9]+}}, [[MOVED]]
93 ; GCN: buffer_store_dword [[ADD]]
94 define void @smrd_valu2_salu_user(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in, i32 %a) #1 {
96 %tmp = call i32 @llvm.r600.read.tidig.x() #0
97 %tmp1 = add i32 %tmp, 4
98 %tmp2 = getelementptr [8 x i32], [8 x i32] addrspace(2)* %in, i32 %tmp, i32 4
99 %tmp3 = load i32, i32 addrspace(2)* %tmp2
100 %tmp4 = add i32 %tmp3, %a
101 store i32 %tmp4, i32 addrspace(1)* %out
105 ; GCN-LABEL: {{^}}smrd_valu2_max_smrd_offset:
106 ; GCN: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1020{{$}}
107 define void @smrd_valu2_max_smrd_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(2)* %in) #1 {
109 %tmp = call i32 @llvm.r600.read.tidig.x() #0
110 %tmp1 = add i32 %tmp, 4
111 %tmp2 = getelementptr [1024 x i32], [1024 x i32] addrspace(2)* %in, i32 %tmp, i32 255
112 %tmp3 = load i32, i32 addrspace(2)* %tmp2
113 store i32 %tmp3, i32 addrspace(1)* %out
117 ; Offset is too big to fit in SMRD 8-bit offset, but small enough to
118 ; fit in MUBUF offset.
119 ; FIXME: We should be using the offset but we don't
121 ; GCN-LABEL: {{^}}smrd_valu2_mubuf_offset:
122 ; GCN: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
123 define void @smrd_valu2_mubuf_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(2)* %in) #1 {
125 %tmp = call i32 @llvm.r600.read.tidig.x() #0
126 %tmp1 = add i32 %tmp, 4
127 %tmp2 = getelementptr [1024 x i32], [1024 x i32] addrspace(2)* %in, i32 %tmp, i32 256
128 %tmp3 = load i32, i32 addrspace(2)* %tmp2
129 store i32 %tmp3, i32 addrspace(1)* %out
133 ; GCN-LABEL: {{^}}s_load_imm_v8i32:
134 ; GCN: buffer_load_dwordx4
135 ; GCN: buffer_load_dwordx4
136 define void @s_load_imm_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
138 %tmp0 = tail call i32 @llvm.r600.read.tidig.x()
139 %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
140 %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <8 x i32> addrspace(2)*
141 %tmp3 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp2, align 4
142 store <8 x i32> %tmp3, <8 x i32> addrspace(1)* %out, align 32
146 ; GCN-LABEL: {{^}}s_load_imm_v8i32_salu_user:
147 ; GCN: buffer_load_dwordx4
148 ; GCN: buffer_load_dwordx4
156 ; GCN: buffer_store_dword
157 define void @s_load_imm_v8i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
159 %tmp0 = tail call i32 @llvm.r600.read.tidig.x()
160 %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
161 %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <8 x i32> addrspace(2)*
162 %tmp3 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp2, align 4
164 %elt0 = extractelement <8 x i32> %tmp3, i32 0
165 %elt1 = extractelement <8 x i32> %tmp3, i32 1
166 %elt2 = extractelement <8 x i32> %tmp3, i32 2
167 %elt3 = extractelement <8 x i32> %tmp3, i32 3
168 %elt4 = extractelement <8 x i32> %tmp3, i32 4
169 %elt5 = extractelement <8 x i32> %tmp3, i32 5
170 %elt6 = extractelement <8 x i32> %tmp3, i32 6
171 %elt7 = extractelement <8 x i32> %tmp3, i32 7
173 %add0 = add i32 %elt0, %elt1
174 %add1 = add i32 %add0, %elt2
175 %add2 = add i32 %add1, %elt3
176 %add3 = add i32 %add2, %elt4
177 %add4 = add i32 %add3, %elt5
178 %add5 = add i32 %add4, %elt6
179 %add6 = add i32 %add5, %elt7
181 store i32 %add6, i32 addrspace(1)* %out
185 ; GCN-LABEL: {{^}}s_load_imm_v16i32:
186 ; GCN: buffer_load_dwordx4
187 ; GCN: buffer_load_dwordx4
188 ; GCN: buffer_load_dwordx4
189 ; GCN: buffer_load_dwordx4
190 define void @s_load_imm_v16i32(<16 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
192 %tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1
193 %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
194 %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <16 x i32> addrspace(2)*
195 %tmp3 = load <16 x i32>, <16 x i32> addrspace(2)* %tmp2, align 4
196 store <16 x i32> %tmp3, <16 x i32> addrspace(1)* %out, align 32
200 ; GCN-LABEL: {{^}}s_load_imm_v16i32_salu_user:
201 ; GCN: buffer_load_dwordx4
202 ; GCN: buffer_load_dwordx4
203 ; GCN: buffer_load_dwordx4
204 ; GCN: buffer_load_dwordx4
220 ; GCN: buffer_store_dword
221 define void @s_load_imm_v16i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 {
223 %tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1
224 %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
225 %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <16 x i32> addrspace(2)*
226 %tmp3 = load <16 x i32>, <16 x i32> addrspace(2)* %tmp2, align 4
228 %elt0 = extractelement <16 x i32> %tmp3, i32 0
229 %elt1 = extractelement <16 x i32> %tmp3, i32 1
230 %elt2 = extractelement <16 x i32> %tmp3, i32 2
231 %elt3 = extractelement <16 x i32> %tmp3, i32 3
232 %elt4 = extractelement <16 x i32> %tmp3, i32 4
233 %elt5 = extractelement <16 x i32> %tmp3, i32 5
234 %elt6 = extractelement <16 x i32> %tmp3, i32 6
235 %elt7 = extractelement <16 x i32> %tmp3, i32 7
236 %elt8 = extractelement <16 x i32> %tmp3, i32 8
237 %elt9 = extractelement <16 x i32> %tmp3, i32 9
238 %elt10 = extractelement <16 x i32> %tmp3, i32 10
239 %elt11 = extractelement <16 x i32> %tmp3, i32 11
240 %elt12 = extractelement <16 x i32> %tmp3, i32 12
241 %elt13 = extractelement <16 x i32> %tmp3, i32 13
242 %elt14 = extractelement <16 x i32> %tmp3, i32 14
243 %elt15 = extractelement <16 x i32> %tmp3, i32 15
245 %add0 = add i32 %elt0, %elt1
246 %add1 = add i32 %add0, %elt2
247 %add2 = add i32 %add1, %elt3
248 %add3 = add i32 %add2, %elt4
249 %add4 = add i32 %add3, %elt5
250 %add5 = add i32 %add4, %elt6
251 %add6 = add i32 %add5, %elt7
252 %add7 = add i32 %add6, %elt8
253 %add8 = add i32 %add7, %elt9
254 %add9 = add i32 %add8, %elt10
255 %add10 = add i32 %add9, %elt11
256 %add11 = add i32 %add10, %elt12
257 %add12 = add i32 %add11, %elt13
258 %add13 = add i32 %add12, %elt14
259 %add14 = add i32 %add13, %elt15
261 store i32 %add14, i32 addrspace(1)* %out
265 attributes #0 = { nounwind readnone }
266 attributes #1 = { nounwind }