1 ; RUN: llc -march=r600 -mcpu=SI -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-ALLOCA -check-prefix=SI %s
2 ; RUN: llc -march=r600 -mcpu=SI -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=SI-PROMOTE -check-prefix=SI %s
5 declare void @llvm.AMDGPU.barrier.local() noduplicate nounwind
7 ; SI-LABEL: @private_access_f64_alloca:
9 ; SI-ALLOCA: BUFFER_STORE_DWORDX2
10 ; SI-ALLOCA: BUFFER_LOAD_DWORDX2
12 ; SI-PROMOTE: DS_WRITE_B64
13 ; SI-PROMOTE: DS_READ_B64
14 define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) nounwind {
15 %val = load double addrspace(1)* %in, align 8
16 %array = alloca double, i32 16, align 8
17 %ptr = getelementptr double* %array, i32 %b
18 store double %val, double* %ptr, align 8
19 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
20 %result = load double* %ptr, align 8
21 store double %result, double addrspace(1)* %out, align 8
25 ; SI-LABEL: @private_access_v2f64_alloca:
27 ; SI-ALLOCA: BUFFER_STORE_DWORDX4
28 ; SI-ALLOCA: BUFFER_LOAD_DWORDX4
30 ; SI-PROMOTE: DS_WRITE_B32
31 ; SI-PROMOTE: DS_WRITE_B32
32 ; SI-PROMOTE: DS_WRITE_B32
33 ; SI-PROMOTE: DS_WRITE_B32
34 ; SI-PROMOTE: DS_READ_B32
35 ; SI-PROMOTE: DS_READ_B32
36 ; SI-PROMOTE: DS_READ_B32
37 ; SI-PROMOTE: DS_READ_B32
38 define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind {
39 %val = load <2 x double> addrspace(1)* %in, align 16
40 %array = alloca <2 x double>, i32 16, align 16
41 %ptr = getelementptr <2 x double>* %array, i32 %b
42 store <2 x double> %val, <2 x double>* %ptr, align 16
43 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
44 %result = load <2 x double>* %ptr, align 16
45 store <2 x double> %result, <2 x double> addrspace(1)* %out, align 16
49 ; SI-LABEL: @private_access_i64_alloca:
51 ; SI-ALLOCA: BUFFER_STORE_DWORDX2
52 ; SI-ALLOCA: BUFFER_LOAD_DWORDX2
54 ; SI-PROMOTE: DS_WRITE_B64
55 ; SI-PROMOTE: DS_READ_B64
56 define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) nounwind {
57 %val = load i64 addrspace(1)* %in, align 8
58 %array = alloca i64, i32 16, align 8
59 %ptr = getelementptr i64* %array, i32 %b
60 store i64 %val, i64* %ptr, align 8
61 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
62 %result = load i64* %ptr, align 8
63 store i64 %result, i64 addrspace(1)* %out, align 8
67 ; SI-LABEL: @private_access_v2i64_alloca:
69 ; SI-ALLOCA: BUFFER_STORE_DWORDX4
70 ; SI-ALLOCA: BUFFER_LOAD_DWORDX4
72 ; SI-PROMOTE: DS_WRITE_B32
73 ; SI-PROMOTE: DS_WRITE_B32
74 ; SI-PROMOTE: DS_WRITE_B32
75 ; SI-PROMOTE: DS_WRITE_B32
76 ; SI-PROMOTE: DS_READ_B32
77 ; SI-PROMOTE: DS_READ_B32
78 ; SI-PROMOTE: DS_READ_B32
79 ; SI-PROMOTE: DS_READ_B32
80 define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind {
81 %val = load <2 x i64> addrspace(1)* %in, align 16
82 %array = alloca <2 x i64>, i32 16, align 16
83 %ptr = getelementptr <2 x i64>* %array, i32 %b
84 store <2 x i64> %val, <2 x i64>* %ptr, align 16
85 call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
86 %result = load <2 x i64>* %ptr, align 16
87 store <2 x i64> %result, <2 x i64> addrspace(1)* %out, align 16