1 ; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
2 ; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
4 @vda = common global <2 x double> zeroinitializer, align 16
5 @vdb = common global <2 x double> zeroinitializer, align 16
6 @vdr = common global <2 x double> zeroinitializer, align 16
7 @vfa = common global <4 x float> zeroinitializer, align 16
8 @vfb = common global <4 x float> zeroinitializer, align 16
9 @vfr = common global <4 x float> zeroinitializer, align 16
10 @vbllr = common global <2 x i64> zeroinitializer, align 16
11 @vbir = common global <4 x i32> zeroinitializer, align 16
12 @vblla = common global <2 x i64> zeroinitializer, align 16
13 @vbllb = common global <2 x i64> zeroinitializer, align 16
14 @vbia = common global <4 x i32> zeroinitializer, align 16
15 @vbib = common global <4 x i32> zeroinitializer, align 16
17 ; Function Attrs: nounwind
18 define void @test1() {
20 %0 = load <2 x double>, <2 x double>* @vda, align 16
21 %1 = load <2 x double>, <2 x double>* @vdb, align 16
22 %2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1)
23 store <2 x double> %2, <2 x double>* @vdr, align 16
26 ; CHECK: xvdivdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
29 ; Function Attrs: nounwind
30 define void @test2() {
32 %0 = load <4 x float>, <4 x float>* @vfa, align 16
33 %1 = load <4 x float>, <4 x float>* @vfb, align 16
34 %2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1)
35 store <4 x float> %2, <4 x float>* @vfr, align 16
38 ; CHECK: xvdivsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
41 ; Function Attrs: nounwind
42 define void @test3() {
44 %0 = load <2 x double>, <2 x double>* @vda, align 16
45 %1 = load <2 x double>, <2 x double>* @vda, align 16
46 %2 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %1)
47 store <2 x double> %2, <2 x double>* @vdr, align 16
50 ; CHECK: xvrdpip {{[0-9]+}}, {{[0-9]+}}
53 ; Function Attrs: nounwind
54 define void @test4() {
56 %0 = load <4 x float>, <4 x float>* @vfa, align 16
57 %1 = load <4 x float>, <4 x float>* @vfa, align 16
58 %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %1)
59 store <4 x float> %2, <4 x float>* @vfr, align 16
62 ; CHECK: xvrspip {{[0-9]+}}, {{[0-9]+}}
65 ; Function Attrs: nounwind
66 define void @test5() {
68 %0 = load <2 x double>, <2 x double>* @vda, align 16
69 %1 = load <2 x double>, <2 x double>* @vdb, align 16
70 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1)
71 store <2 x i64> %2, <2 x i64>* @vbllr, align 16
74 ; CHECK: xvcmpeqdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
77 ; Function Attrs: nounwind
78 define void @test6() {
80 %0 = load <4 x float>, <4 x float>* @vfa, align 16
81 %1 = load <4 x float>, <4 x float>* @vfb, align 16
82 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1)
83 store <4 x i32> %2, <4 x i32>* @vbir, align 16
86 ; CHECK: xvcmpeqsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
89 ; Function Attrs: nounwind
90 define void @test7() {
92 %0 = load <2 x double>, <2 x double>* @vda, align 16
93 %1 = load <2 x double>, <2 x double>* @vdb, align 16
94 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1)
95 store <2 x i64> %2, <2 x i64>* @vbllr, align 16
98 ; CHECK: xvcmpgedp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
101 ; Function Attrs: nounwind
102 define void @test8() {
104 %0 = load <4 x float>, <4 x float>* @vfa, align 16
105 %1 = load <4 x float>, <4 x float>* @vfb, align 16
106 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1)
107 store <4 x i32> %2, <4 x i32>* @vbir, align 16
109 ; CHECK-LABEL: @test8
110 ; CHECK: xvcmpgesp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
113 ; Function Attrs: nounwind
114 define void @test9() {
116 %0 = load <2 x double>, <2 x double>* @vda, align 16
117 %1 = load <2 x double>, <2 x double>* @vdb, align 16
118 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1)
119 store <2 x i64> %2, <2 x i64>* @vbllr, align 16
121 ; CHECK-LABEL: @test9
122 ; CHECK: xvcmpgtdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
125 ; Function Attrs: nounwind
126 define void @test10() {
128 %0 = load <4 x float>, <4 x float>* @vfa, align 16
129 %1 = load <4 x float>, <4 x float>* @vfb, align 16
130 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1)
131 store <4 x i32> %2, <4 x i32>* @vbir, align 16
133 ; CHECK-LABEL: @test10
134 ; CHECK: xvcmpgtsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
137 ; Function Attrs: nounwind readnone
138 declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
140 ; Function Attrs: nounwind readnone
141 declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
143 ; Function Attrs: nounwind readnone
144 declare <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double>, <2 x double>)
146 ; Function Attrs: nounwind readnone
147 declare <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float>, <4 x float>)
149 ; Function Attrs: nounwind readnone
150 declare <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double>, <2 x double>)
152 ; Function Attrs: nounwind readnone
153 declare <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float>, <4 x float>)
155 ; Function Attrs: nounwind readnone
156 declare <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double>, <2 x double>)
158 ; Function Attrs: nounwind readnone
159 declare <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float>, <4 x float>)
161 ; Function Attrs: nounwind readnone
162 declare <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double>, <2 x double>)
164 ; Function Attrs: nounwind readnone
165 declare <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float>, <4 x float>)