1 ; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI %s
2 ; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI %s
4 ; SI-LABEL: {{^}}no_reorder_v2f64_global_load_store:
5 ; SI: buffer_load_dwordx4
6 ; SI: buffer_load_dwordx4
7 ; SI: buffer_store_dwordx4
8 ; SI: buffer_store_dwordx4
10 define void @no_reorder_v2f64_global_load_store(<2 x double> addrspace(1)* nocapture %x, <2 x double> addrspace(1)* nocapture %y) nounwind {
11 %tmp1 = load <2 x double>, <2 x double> addrspace(1)* %x, align 16
12 %tmp4 = load <2 x double>, <2 x double> addrspace(1)* %y, align 16
13 store <2 x double> %tmp4, <2 x double> addrspace(1)* %x, align 16
14 store <2 x double> %tmp1, <2 x double> addrspace(1)* %y, align 16
18 ; SI-LABEL: {{^}}no_reorder_scalarized_v2f64_local_load_store:
24 define void @no_reorder_scalarized_v2f64_local_load_store(<2 x double> addrspace(3)* nocapture %x, <2 x double> addrspace(3)* nocapture %y) nounwind {
25 %tmp1 = load <2 x double>, <2 x double> addrspace(3)* %x, align 16
26 %tmp4 = load <2 x double>, <2 x double> addrspace(3)* %y, align 16
27 store <2 x double> %tmp4, <2 x double> addrspace(3)* %x, align 16
28 store <2 x double> %tmp1, <2 x double> addrspace(3)* %y, align 16
32 ; SI-LABEL: {{^}}no_reorder_split_v8i32_global_load_store:
33 ; SI: buffer_load_dwordx4
34 ; SI: buffer_load_dwordx4
35 ; SI: buffer_load_dwordx4
36 ; SI: buffer_load_dwordx4
39 ; SI: buffer_store_dwordx4
40 ; SI: buffer_store_dwordx4
41 ; SI: buffer_store_dwordx4
42 ; SI: buffer_store_dwordx4
44 define void @no_reorder_split_v8i32_global_load_store(<8 x i32> addrspace(1)* nocapture %x, <8 x i32> addrspace(1)* nocapture %y) nounwind {
45 %tmp1 = load <8 x i32>, <8 x i32> addrspace(1)* %x, align 32
46 %tmp4 = load <8 x i32>, <8 x i32> addrspace(1)* %y, align 32
47 store <8 x i32> %tmp4, <8 x i32> addrspace(1)* %x, align 32
48 store <8 x i32> %tmp1, <8 x i32> addrspace(1)* %y, align 32
52 ; SI-LABEL: {{^}}no_reorder_extload_64:
59 define void @no_reorder_extload_64(<2 x i32> addrspace(3)* nocapture %x, <2 x i32> addrspace(3)* nocapture %y) nounwind {
60 %tmp1 = load <2 x i32>, <2 x i32> addrspace(3)* %x, align 8
61 %tmp4 = load <2 x i32>, <2 x i32> addrspace(3)* %y, align 8
62 %tmp1ext = zext <2 x i32> %tmp1 to <2 x i64>
63 %tmp4ext = zext <2 x i32> %tmp4 to <2 x i64>
64 %tmp7 = add <2 x i64> %tmp1ext, <i64 1, i64 1>
65 %tmp9 = add <2 x i64> %tmp4ext, <i64 1, i64 1>
66 %trunctmp9 = trunc <2 x i64> %tmp9 to <2 x i32>
67 %trunctmp7 = trunc <2 x i64> %tmp7 to <2 x i32>
68 store <2 x i32> %trunctmp9, <2 x i32> addrspace(3)* %x, align 8
69 store <2 x i32> %trunctmp7, <2 x i32> addrspace(3)* %y, align 8