1 ; RUN: llc -O0 -march=amdgcn -mcpu=bonaire -mattr=-promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-NO-PROMOTE %s
2 ; RUN: llc -O0 -march=amdgcn -mcpu=bonaire -mattr=+promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-PROMOTE %s
3 ; RUN: llc -O0 -march=amdgcn -mcpu=tonga -mattr=-promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-NO-PROMOTE %s
4 ; RUN: llc -O0 -march=amdgcn -mcpu=tonga -mattr=+promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-PROMOTE %s
6 ; Disable optimizations in case there are optimizations added that
7 ; specialize away generic pointer accesses.
10 ; These testcases might become useless when there are optimizations to
11 ; remove generic pointers.
13 ; CHECK-LABEL: {{^}}store_flat_i32:
14 ; CHECK: v_mov_b32_e32 v[[DATA:[0-9]+]], {{s[0-9]+}}
15 ; CHECK: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], {{s[0-9]+}}
16 ; CHECK: v_mov_b32_e32 v[[HI_VREG:[0-9]+]], {{s[0-9]+}}
17 ; CHECK: flat_store_dword v[[DATA]], v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
18 define void @store_flat_i32(i32 addrspace(1)* %gptr, i32 %x) #0 {
19 %fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
20 store i32 %x, i32 addrspace(4)* %fptr, align 4
24 ; CHECK-LABEL: {{^}}store_flat_i64:
25 ; CHECK: flat_store_dwordx2
26 define void @store_flat_i64(i64 addrspace(1)* %gptr, i64 %x) #0 {
27 %fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
28 store i64 %x, i64 addrspace(4)* %fptr, align 8
32 ; CHECK-LABEL: {{^}}store_flat_v4i32:
33 ; CHECK: flat_store_dwordx4
34 define void @store_flat_v4i32(<4 x i32> addrspace(1)* %gptr, <4 x i32> %x) #0 {
35 %fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
36 store <4 x i32> %x, <4 x i32> addrspace(4)* %fptr, align 16
40 ; CHECK-LABEL: {{^}}store_flat_trunc_i16:
41 ; CHECK: flat_store_short
42 define void @store_flat_trunc_i16(i16 addrspace(1)* %gptr, i32 %x) #0 {
43 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
44 %y = trunc i32 %x to i16
45 store i16 %y, i16 addrspace(4)* %fptr, align 2
49 ; CHECK-LABEL: {{^}}store_flat_trunc_i8:
50 ; CHECK: flat_store_byte
51 define void @store_flat_trunc_i8(i8 addrspace(1)* %gptr, i32 %x) #0 {
52 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
53 %y = trunc i32 %x to i8
54 store i8 %y, i8 addrspace(4)* %fptr, align 2
60 ; CHECK-LABEL: load_flat_i32:
61 ; CHECK: flat_load_dword
62 define void @load_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %gptr) #0 {
63 %fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)*
64 %fload = load i32, i32 addrspace(4)* %fptr, align 4
65 store i32 %fload, i32 addrspace(1)* %out, align 4
69 ; CHECK-LABEL: load_flat_i64:
70 ; CHECK: flat_load_dwordx2
71 define void @load_flat_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %gptr) #0 {
72 %fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)*
73 %fload = load i64, i64 addrspace(4)* %fptr, align 4
74 store i64 %fload, i64 addrspace(1)* %out, align 8
78 ; CHECK-LABEL: load_flat_v4i32:
79 ; CHECK: flat_load_dwordx4
80 define void @load_flat_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %gptr) #0 {
81 %fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)*
82 %fload = load <4 x i32>, <4 x i32> addrspace(4)* %fptr, align 4
83 store <4 x i32> %fload, <4 x i32> addrspace(1)* %out, align 8
87 ; CHECK-LABEL: sextload_flat_i8:
88 ; CHECK: flat_load_sbyte
89 define void @sextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
90 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
91 %fload = load i8, i8 addrspace(4)* %fptr, align 4
92 %ext = sext i8 %fload to i32
93 store i32 %ext, i32 addrspace(1)* %out, align 4
97 ; CHECK-LABEL: zextload_flat_i8:
98 ; CHECK: flat_load_ubyte
99 define void @zextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 {
100 %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)*
101 %fload = load i8, i8 addrspace(4)* %fptr, align 4
102 %ext = zext i8 %fload to i32
103 store i32 %ext, i32 addrspace(1)* %out, align 4
107 ; CHECK-LABEL: sextload_flat_i16:
108 ; CHECK: flat_load_sshort
109 define void @sextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
110 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
111 %fload = load i16, i16 addrspace(4)* %fptr, align 4
112 %ext = sext i16 %fload to i32
113 store i32 %ext, i32 addrspace(1)* %out, align 4
117 ; CHECK-LABEL: zextload_flat_i16:
118 ; CHECK: flat_load_ushort
119 define void @zextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 {
120 %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)*
121 %fload = load i16, i16 addrspace(4)* %fptr, align 4
122 %ext = zext i16 %fload to i32
123 store i32 %ext, i32 addrspace(1)* %out, align 4
127 declare void @llvm.AMDGPU.barrier.local() #1
128 declare i32 @llvm.r600.read.tidig.x() #3
130 attributes #0 = { nounwind }
131 attributes #1 = { nounwind noduplicate }
132 attributes #3 = { nounwind readnone }