1 ; RUN: llc -O0 -march=amdgcn -mcpu=bonaire -mattr=-promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-NO-PROMOTE %s
2 ; RUN: llc -O0 -march=amdgcn -mcpu=bonaire -mattr=+promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-PROMOTE %s
4 ; Disable optimizations in case there are optimizations added that
5 ; specialize away generic pointer accesses.
8 ; CHECK-LABEL: {{^}}branch_use_flat_i32:
9 ; CHECK: flat_store_dword {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, [M0, FLAT_SCRATCH]
11 define void @branch_use_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %gptr, i32 addrspace(3)* %lptr, i32 %x, i32 %c) #0 {
13 %cmp = icmp ne i32 %c, 0
14 br i1 %cmp, label %local, label %global
17 %flat_local = addrspacecast i32 addrspace(3)* %lptr to i32 addrspace(4)*
21 %flat_global = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
25 %fptr = phi i32 addrspace(4)* [ %flat_local, %local ], [ %flat_global, %global ]
26 store i32 %x, i32 addrspace(4)* %fptr, align 4
27 ; %val = load i32 addrspace(4)* %fptr, align 4
28 ; store i32 %val, i32 addrspace(1)* %out, align 4
34 ; These testcases might become useless when there are optimizations to
35 ; remove generic pointers.
37 ; CHECK-LABEL: {{^}}store_flat_i32:
38 ; CHECK: v_mov_b32_e32 v[[DATA:[0-9]+]], {{s[0-9]+}}
39 ; CHECK: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], {{s[0-9]+}}
40 ; CHECK: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], {{s[0-9]+}}
41 ; CHECK: flat_store_dword v[[DATA]], v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
42 define void @store_flat_i32(i32 addrspace(1)* %gptr, i32 %x) #0 {
43 %fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
44 store i32 %x, i32 addrspace(4)* %fptr, align 4
48 ; CHECK-LABEL: {{^}}store_flat_i64:
49 ; CHECK: flat_store_dwordx2
50 define void @store_flat_i64(i64 addrspace(1)* %gptr, i64 %x) #0 {
51 %fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
52 store i64 %x, i64 addrspace(4)* %fptr, align 8
56 ; CHECK-LABEL: {{^}}store_flat_v4i32:
57 ; CHECK: flat_store_dwordx4
58 define void @store_flat_v4i32(<4 x i32> addrspace(1)* %gptr, <4 x i32> %x) #0 {
59 %fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
60 store <4 x i32> %x, <4 x i32> addrspace(4)* %fptr, align 16
64 ; CHECK-LABEL: {{^}}store_flat_trunc_i16:
65 ; CHECK: flat_store_short
66 define void @store_flat_trunc_i16(i16 addrspace(1)* %gptr, i32 %x) #0 {
67 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
68 %y = trunc i32 %x to i16
69 store i16 %y, i16 addrspace(4)* %fptr, align 2
73 ; CHECK-LABEL: {{^}}store_flat_trunc_i8:
74 ; CHECK: flat_store_byte
75 define void @store_flat_trunc_i8(i8 addrspace(1)* %gptr, i32 %x) #0 {
76 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
77 %y = trunc i32 %x to i8
78 store i8 %y, i8 addrspace(4)* %fptr, align 2
84 ; CHECK-LABEL @load_flat_i32:
85 ; CHECK: flat_load_dword
86 define void @load_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %gptr) #0 {
87 %fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
88 %fload = load i32 addrspace(4)* %fptr, align 4
89 store i32 %fload, i32 addrspace(1)* %out, align 4
93 ; CHECK-LABEL @load_flat_i64:
94 ; CHECK: flat_load_dwordx2
95 define void @load_flat_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %gptr) #0 {
96 %fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
97 %fload = load i64 addrspace(4)* %fptr, align 4
98 store i64 %fload, i64 addrspace(1)* %out, align 8
102 ; CHECK-LABEL @load_flat_v4i32:
103 ; CHECK: flat_load_dwordx4
104 define void @load_flat_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %gptr) #0 {
105 %fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
106 %fload = load <4 x i32> addrspace(4)* %fptr, align 4
107 store <4 x i32> %fload, <4 x i32> addrspace(1)* %out, align 8
111 ; CHECK-LABEL @sextload_flat_i8:
112 ; CHECK: flat_load_sbyte
113 define void @sextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
114 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
115 %fload = load i8 addrspace(4)* %fptr, align 4
116 %ext = sext i8 %fload to i32
117 store i32 %ext, i32 addrspace(1)* %out, align 4
121 ; CHECK-LABEL @zextload_flat_i8:
122 ; CHECK: flat_load_ubyte
123 define void @zextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
124 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
125 %fload = load i8 addrspace(4)* %fptr, align 4
126 %ext = zext i8 %fload to i32
127 store i32 %ext, i32 addrspace(1)* %out, align 4
131 ; CHECK-LABEL @sextload_flat_i16:
132 ; CHECK: flat_load_sshort
133 define void @sextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
134 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
135 %fload = load i16 addrspace(4)* %fptr, align 4
136 %ext = sext i16 %fload to i32
137 store i32 %ext, i32 addrspace(1)* %out, align 4
141 ; CHECK-LABEL @zextload_flat_i16:
142 ; CHECK: flat_load_ushort
143 define void @zextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
144 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
145 %fload = load i16 addrspace(4)* %fptr, align 4
146 %ext = zext i16 %fload to i32
147 store i32 %ext, i32 addrspace(1)* %out, align 4
153 ; TODO: This should not be zero when registers are used for small
154 ; scratch allocations again.
156 ; Check for prologue initializing special SGPRs pointing to scratch.
157 ; CHECK-LABEL: {{^}}store_flat_scratch:
158 ; CHECK: s_movk_i32 flat_scratch_lo, 0
159 ; CHECK-NO-PROMOTE: s_movk_i32 flat_scratch_hi, 0x28{{$}}
160 ; CHECK-PROMOTE: s_movk_i32 flat_scratch_hi, 0x0{{$}}
161 ; CHECK: flat_store_dword
163 ; CHECK: flat_load_dword
164 define void @store_flat_scratch(i32 addrspace(1)* noalias %out, i32) #0 {
165 %alloca = alloca i32, i32 9, align 4
166 %x = call i32 @llvm.r600.read.tidig.x() #3
167 %pptr = getelementptr i32* %alloca, i32 %x
168 %fptr = addrspacecast i32* %pptr to i32 addrspace(4)*
169 store i32 %x, i32 addrspace(4)* %fptr
171 call void @llvm.AMDGPU.barrier.local() #1
172 %reload = load i32 addrspace(4)* %fptr, align 4
173 store i32 %reload, i32 addrspace(1)* %out, align 4
177 declare void @llvm.AMDGPU.barrier.local() #1
178 declare i32 @llvm.r600.read.tidig.x() #3
180 attributes #0 = { nounwind }
181 attributes #1 = { nounwind noduplicate }
182 attributes #3 = { nounwind readnone }