1 ; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
3 ; Test that doing a shift of a pointer with a constant add will be
4 ; folded into the constant offset addressing mode even if the add has
5 ; multiple uses. This is relevant to accessing 2 separate, adjacent
9 declare i32 @llvm.r600.read.tidig.x() #1
11 @lds0 = addrspace(3) global [512 x float] undef, align 4
12 @lds1 = addrspace(3) global [512 x float] undef, align 4
15 ; Make sure the (add tid, 2) << 2 gets folded into the ds's offset as (tid << 2) + 8
17 ; SI-LABEL: {{^}}load_shl_base_lds_0:
18 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
19 ; SI: ds_read_b32 {{v[0-9]+}}, [[PTR]] offset:8 [M0]
21 define void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
22 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
23 %idx.0 = add nsw i32 %tid.x, 2
24 %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
25 %val0 = load float addrspace(3)* %arrayidx0, align 4
26 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
27 store float %val0, float addrspace(1)* %out
31 ; Make sure once the first use is folded into the addressing mode, the
32 ; remaining add use goes through the normal shl + add constant fold.
34 ; SI-LABEL: {{^}}load_shl_base_lds_1:
35 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
36 ; SI: ds_read_b32 [[RESULT:v[0-9]+]], [[PTR]] offset:8 [M0]
37 ; SI: v_add_i32_e32 [[ADDUSE:v[0-9]+]], 8, v{{[0-9]+}}
38 ; SI-DAG: buffer_store_dword [[RESULT]]
39 ; SI-DAG: buffer_store_dword [[ADDUSE]]
41 define void @load_shl_base_lds_1(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
42 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
43 %idx.0 = add nsw i32 %tid.x, 2
44 %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
45 %val0 = load float addrspace(3)* %arrayidx0, align 4
46 %shl_add_use = shl i32 %idx.0, 2
47 store i32 %shl_add_use, i32 addrspace(1)* %add_use, align 4
48 store float %val0, float addrspace(1)* %out
52 @maxlds = addrspace(3) global [65536 x i8] undef, align 4
54 ; SI-LABEL: {{^}}load_shl_base_lds_max_offset
55 ; SI: ds_read_u8 v{{[0-9]+}}, v{{[0-9]+}} offset:65535
57 define void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %lds, i32 addrspace(1)* %add_use) #0 {
58 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
59 %idx.0 = add nsw i32 %tid.x, 65535
60 %arrayidx0 = getelementptr inbounds [65536 x i8] addrspace(3)* @maxlds, i32 0, i32 %idx.0
61 %val0 = load i8 addrspace(3)* %arrayidx0
62 store i32 %idx.0, i32 addrspace(1)* %add_use
63 store i8 %val0, i8 addrspace(1)* %out
67 ; The two globals are placed adjacent in memory, so the same base
68 ; pointer can be used with an offset into the second one.
70 ; SI-LABEL: {{^}}load_shl_base_lds_2:
71 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
72 ; SI-NEXT: ds_read2st64_b32 {{v\[[0-9]+:[0-9]+\]}}, [[PTR]] offset0:1 offset1:9 [M0]
74 define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
75 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
76 %idx.0 = add nsw i32 %tid.x, 64
77 %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
78 %val0 = load float addrspace(3)* %arrayidx0, align 4
79 %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds1, i32 0, i32 %idx.0
80 %val1 = load float addrspace(3)* %arrayidx1, align 4
81 %sum = fadd float %val0, %val1
82 store float %sum, float addrspace(1)* %out, align 4
86 ; SI-LABEL: {{^}}store_shl_base_lds_0:
87 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
88 ; SI: ds_write_b32 [[PTR]], {{v[0-9]+}} offset:8 [M0]
90 define void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
91 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
92 %idx.0 = add nsw i32 %tid.x, 2
93 %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
94 store float 1.0, float addrspace(3)* %arrayidx0, align 4
95 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
100 ; --------------------------------------------------------------------------------
103 @lds2 = addrspace(3) global [512 x i32] undef, align 4
105 ; define void @atomic_load_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
106 ; %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
107 ; %idx.0 = add nsw i32 %tid.x, 2
108 ; %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
109 ; %val = load atomic i32 addrspace(3)* %arrayidx0 seq_cst, align 4
110 ; store i32 %val, i32 addrspace(1)* %out, align 4
111 ; store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
116 ; SI-LABEL: {{^}}atomic_cmpxchg_shl_base_lds_0:
117 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
118 ; SI: ds_cmpst_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, {{v[0-9]+}} offset:8
120 define void @atomic_cmpxchg_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use, i32 %swap) #0 {
121 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
122 %idx.0 = add nsw i32 %tid.x, 2
123 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
124 %pair = cmpxchg i32 addrspace(3)* %arrayidx0, i32 7, i32 %swap seq_cst monotonic
125 %result = extractvalue { i32, i1 } %pair, 0
126 store i32 %result, i32 addrspace(1)* %out, align 4
127 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
131 ; SI-LABEL: {{^}}atomic_swap_shl_base_lds_0:
132 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
133 ; SI: ds_wrxchg_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
135 define void @atomic_swap_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
136 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
137 %idx.0 = add nsw i32 %tid.x, 2
138 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
139 %val = atomicrmw xchg i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
140 store i32 %val, i32 addrspace(1)* %out, align 4
141 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
145 ; SI-LABEL: {{^}}atomic_add_shl_base_lds_0:
146 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
147 ; SI: ds_add_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
149 define void @atomic_add_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
150 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
151 %idx.0 = add nsw i32 %tid.x, 2
152 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
153 %val = atomicrmw add i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
154 store i32 %val, i32 addrspace(1)* %out, align 4
155 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
159 ; SI-LABEL: {{^}}atomic_sub_shl_base_lds_0:
160 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
161 ; SI: ds_sub_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
163 define void @atomic_sub_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
164 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
165 %idx.0 = add nsw i32 %tid.x, 2
166 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
167 %val = atomicrmw sub i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
168 store i32 %val, i32 addrspace(1)* %out, align 4
169 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
173 ; SI-LABEL: {{^}}atomic_and_shl_base_lds_0:
174 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
175 ; SI: ds_and_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
177 define void @atomic_and_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
178 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
179 %idx.0 = add nsw i32 %tid.x, 2
180 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
181 %val = atomicrmw and i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
182 store i32 %val, i32 addrspace(1)* %out, align 4
183 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
187 ; SI-LABEL: {{^}}atomic_or_shl_base_lds_0:
188 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
189 ; SI: ds_or_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
191 define void @atomic_or_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
192 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
193 %idx.0 = add nsw i32 %tid.x, 2
194 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
195 %val = atomicrmw or i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
196 store i32 %val, i32 addrspace(1)* %out, align 4
197 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
201 ; SI-LABEL: {{^}}atomic_xor_shl_base_lds_0:
202 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
203 ; SI: ds_xor_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
205 define void @atomic_xor_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
206 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
207 %idx.0 = add nsw i32 %tid.x, 2
208 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
209 %val = atomicrmw xor i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
210 store i32 %val, i32 addrspace(1)* %out, align 4
211 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
215 ; define void @atomic_nand_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
216 ; %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
217 ; %idx.0 = add nsw i32 %tid.x, 2
218 ; %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
219 ; %val = atomicrmw nand i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
220 ; store i32 %val, i32 addrspace(1)* %out, align 4
221 ; store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
225 ; SI-LABEL: {{^}}atomic_min_shl_base_lds_0:
226 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
227 ; SI: ds_min_rtn_i32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
229 define void @atomic_min_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
230 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
231 %idx.0 = add nsw i32 %tid.x, 2
232 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
233 %val = atomicrmw min i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
234 store i32 %val, i32 addrspace(1)* %out, align 4
235 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
239 ; SI-LABEL: {{^}}atomic_max_shl_base_lds_0:
240 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
241 ; SI: ds_max_rtn_i32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
243 define void @atomic_max_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
244 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
245 %idx.0 = add nsw i32 %tid.x, 2
246 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
247 %val = atomicrmw max i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
248 store i32 %val, i32 addrspace(1)* %out, align 4
249 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
253 ; SI-LABEL: {{^}}atomic_umin_shl_base_lds_0:
254 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
255 ; SI: ds_min_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
257 define void @atomic_umin_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
258 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
259 %idx.0 = add nsw i32 %tid.x, 2
260 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
261 %val = atomicrmw umin i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
262 store i32 %val, i32 addrspace(1)* %out, align 4
263 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
267 ; SI-LABEL: {{^}}atomic_umax_shl_base_lds_0:
268 ; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
269 ; SI: ds_max_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
271 define void @atomic_umax_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
272 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
273 %idx.0 = add nsw i32 %tid.x, 2
274 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
275 %val = atomicrmw umax i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
276 store i32 %val, i32 addrspace(1)* %out, align 4
277 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
281 attributes #0 = { nounwind }
282 attributes #1 = { nounwind readnone }