1 ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -check-prefix=FUNC -check-prefix=CI %s
3 declare void @llvm.SI.tbuffer.store.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
4 declare void @llvm.SI.tbuffer.store.v4i32(<16 x i8>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
5 declare void @llvm.AMDGPU.barrier.local() #2
8 @stored_lds_ptr = addrspace(3) global i32 addrspace(3)* undef, align 4
9 @stored_constant_ptr = addrspace(3) global i32 addrspace(2)* undef, align 8
10 @stored_global_ptr = addrspace(3) global i32 addrspace(1)* undef, align 8
12 ; FUNC-LABEL: @reorder_local_load_global_store_local_load
13 ; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
14 ; CI-NEXT: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:8
15 ; CI: buffer_store_dword
16 define void @reorder_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
17 %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
19 %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
20 %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
22 %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
23 store i32 99, i32 addrspace(1)* %gptr, align 4
24 %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
26 %add = add nsw i32 %tmp1, %tmp2
28 store i32 %add, i32 addrspace(1)* %out, align 4
32 ; FUNC-LABEL: @no_reorder_local_load_volatile_global_store_local_load
33 ; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
34 ; CI: buffer_store_dword
35 ; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:8
36 define void @no_reorder_local_load_volatile_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
37 %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
39 %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
40 %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
42 %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
43 store volatile i32 99, i32 addrspace(1)* %gptr, align 4
44 %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
46 %add = add nsw i32 %tmp1, %tmp2
48 store i32 %add, i32 addrspace(1)* %out, align 4
52 ; FUNC-LABEL: @no_reorder_barrier_local_load_global_store_local_load
53 ; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
54 ; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:8
55 ; CI: buffer_store_dword
56 define void @no_reorder_barrier_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
57 %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
59 %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
60 %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
62 %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
63 store i32 99, i32 addrspace(1)* %gptr, align 4
64 call void @llvm.AMDGPU.barrier.local() #2
65 %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
67 %add = add nsw i32 %tmp1, %tmp2
69 store i32 %add, i32 addrspace(1)* %out, align 4
73 ; Technically we could reorder these, but just comparing the
74 ; instruction type of the load is insufficient.
76 ; FUNC-LABEL: @no_reorder_constant_load_global_store_constant_load
77 ; CI: buffer_load_dword
78 ; CI: buffer_store_dword
79 ; CI: buffer_load_dword
80 ; CI: buffer_store_dword
81 define void @no_reorder_constant_load_global_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
82 %ptr0 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
84 %ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
85 %ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
87 %tmp1 = load i32, i32 addrspace(2)* %ptr1, align 4
88 store i32 99, i32 addrspace(1)* %gptr, align 4
89 %tmp2 = load i32, i32 addrspace(2)* %ptr2, align 4
91 %add = add nsw i32 %tmp1, %tmp2
93 store i32 %add, i32 addrspace(1)* %out, align 4
97 ; FUNC-LABEL: @reorder_constant_load_local_store_constant_load
98 ; CI: buffer_load_dword
99 ; CI: buffer_load_dword
101 ; CI: buffer_store_dword
102 define void @reorder_constant_load_local_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr) #0 {
103 %ptr0 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
105 %ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
106 %ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
108 %tmp1 = load i32, i32 addrspace(2)* %ptr1, align 4
109 store i32 99, i32 addrspace(3)* %lptr, align 4
110 %tmp2 = load i32, i32 addrspace(2)* %ptr2, align 4
112 %add = add nsw i32 %tmp1, %tmp2
114 store i32 %add, i32 addrspace(1)* %out, align 4
118 ; FUNC-LABEL: @reorder_smrd_load_local_store_smrd_load
123 ; CI: buffer_store_dword
124 define void @reorder_smrd_load_local_store_smrd_load(i32 addrspace(1)* %out, i32 addrspace(3)* noalias %lptr, i32 addrspace(2)* %ptr0) #0 {
125 %ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
126 %ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
128 %tmp1 = load i32, i32 addrspace(2)* %ptr1, align 4
129 store i32 99, i32 addrspace(3)* %lptr, align 4
130 %tmp2 = load i32, i32 addrspace(2)* %ptr2, align 4
132 %add = add nsw i32 %tmp1, %tmp2
134 store i32 %add, i32 addrspace(1)* %out, align 4
138 ; FUNC-LABEL: @reorder_global_load_local_store_global_load
139 ; CI: buffer_load_dword
140 ; CI: buffer_load_dword
142 ; CI: buffer_store_dword
143 define void @reorder_global_load_local_store_global_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr, i32 addrspace(1)* %ptr0) #0 {
144 %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 1
145 %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 2
147 %tmp1 = load i32, i32 addrspace(1)* %ptr1, align 4
148 store i32 99, i32 addrspace(3)* %lptr, align 4
149 %tmp2 = load i32, i32 addrspace(1)* %ptr2, align 4
151 %add = add nsw i32 %tmp1, %tmp2
153 store i32 %add, i32 addrspace(1)* %out, align 4
157 ; FUNC-LABEL: @reorder_local_offsets
158 ; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:400
159 ; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:404
160 ; CI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
161 ; CI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:400
162 ; CI: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:404
163 ; CI: buffer_store_dword
165 define void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(3)* noalias nocapture %ptr0) #0 {
166 %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
167 %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 100
168 %ptr3 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 101
170 store i32 123, i32 addrspace(3)* %ptr1, align 4
171 %tmp1 = load i32, i32 addrspace(3)* %ptr2, align 4
172 %tmp2 = load i32, i32 addrspace(3)* %ptr3, align 4
173 store i32 123, i32 addrspace(3)* %ptr2, align 4
174 %tmp3 = load i32, i32 addrspace(3)* %ptr1, align 4
175 store i32 789, i32 addrspace(3)* %ptr3, align 4
177 %add.0 = add nsw i32 %tmp2, %tmp1
178 %add.1 = add nsw i32 %add.0, %tmp3
179 store i32 %add.1, i32 addrspace(1)* %out, align 4
183 ; FUNC-LABEL: @reorder_global_offsets
184 ; CI: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
185 ; CI: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:404
186 ; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12
187 ; CI: buffer_load_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12
188 ; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
189 ; CI: buffer_store_dword {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:404
190 ; CI: buffer_store_dword
192 define void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(1)* noalias nocapture %ptr0) #0 {
193 %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3
194 %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 100
195 %ptr3 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 101
197 store i32 123, i32 addrspace(1)* %ptr1, align 4
198 %tmp1 = load i32, i32 addrspace(1)* %ptr2, align 4
199 %tmp2 = load i32, i32 addrspace(1)* %ptr3, align 4
200 store i32 123, i32 addrspace(1)* %ptr2, align 4
201 %tmp3 = load i32, i32 addrspace(1)* %ptr1, align 4
202 store i32 789, i32 addrspace(1)* %ptr3, align 4
204 %add.0 = add nsw i32 %tmp2, %tmp1
205 %add.1 = add nsw i32 %add.0, %tmp3
206 store i32 %add.1, i32 addrspace(1)* %out, align 4
210 ; XFUNC-LABEL: @reorder_local_load_tbuffer_store_local_load
211 ; XCI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}}, 0x4
212 ; XCI: TBUFFER_STORE_FORMAT
213 ; XCI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}}, 0x8
214 ; define void @reorder_local_load_tbuffer_store_local_load(i32 addrspace(1)* %out, i32 %a1, i32 %vaddr) #1 {
215 ; %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
217 ; %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
218 ; %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
220 ; %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
222 ; %vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
223 ; call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
224 ; i32 4, i32 %vaddr, i32 0, i32 32, i32 14, i32 4, i32 1, i32 0, i32 1,
227 ; %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
229 ; %add = add nsw i32 %tmp1, %tmp2
231 ; store i32 %add, i32 addrspace(1)* %out, align 4
235 attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
236 attributes #1 = { "ShaderType"="1" nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
237 attributes #2 = { nounwind noduplicate }