1 ; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
2 ; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
3 ; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=+power8-vector -mattr=-vsx < %s | FileCheck %s
4 ; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s -check-prefix=CHECK-VSX
6 @vsc = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>, align 16
7 @vuc = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5>, align 16
8 @res_vll = common global <2 x i64> zeroinitializer, align 16
9 @res_vull = common global <2 x i64> zeroinitializer, align 16
10 @res_vsc = common global <16 x i8> zeroinitializer, align 16
11 @res_vuc = common global <16 x i8> zeroinitializer, align 16
13 ; Function Attrs: nounwind
14 define void @test1() {
16 %__a.addr.i = alloca <16 x i8>, align 16
17 %__b.addr.i = alloca <16 x i8>, align 16
18 %0 = load <16 x i8>, <16 x i8>* @vsc, align 16
19 %1 = load <16 x i8>, <16 x i8>* @vsc, align 16
20 store <16 x i8> %0, <16 x i8>* %__a.addr.i, align 16
21 store <16 x i8> %1, <16 x i8>* %__b.addr.i, align 16
22 %2 = load <16 x i8>, <16 x i8>* %__a.addr.i, align 16
23 %3 = load <16 x i8>, <16 x i8>* %__b.addr.i, align 16
24 %4 = call <2 x i64> @llvm.ppc.altivec.vbpermq(<16 x i8> %2, <16 x i8> %3)
25 store <2 x i64> %4, <2 x i64>* @res_vll, align 16
28 ; CHECK: lvx [[REG1:[0-9]+]],
29 ; CHECK: lvx [[REG2:[0-9]+]],
30 ; CHECK: vbpermq {{[0-9]+}}, [[REG2]], [[REG1]]
31 ; CHECK-VSX: vbpermq {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
34 ; Function Attrs: nounwind
35 define void @test2() {
37 %__a.addr.i = alloca <16 x i8>, align 16
38 %__b.addr.i = alloca <16 x i8>, align 16
39 %0 = load <16 x i8>, <16 x i8>* @vuc, align 16
40 %1 = load <16 x i8>, <16 x i8>* @vuc, align 16
41 store <16 x i8> %0, <16 x i8>* %__a.addr.i, align 16
42 store <16 x i8> %1, <16 x i8>* %__b.addr.i, align 16
43 %2 = load <16 x i8>, <16 x i8>* %__a.addr.i, align 16
44 %3 = load <16 x i8>, <16 x i8>* %__b.addr.i, align 16
45 %4 = call <2 x i64> @llvm.ppc.altivec.vbpermq(<16 x i8> %2, <16 x i8> %3)
46 store <2 x i64> %4, <2 x i64>* @res_vull, align 16
49 ; CHECK: lvx [[REG1:[0-9]+]],
50 ; CHECK: lvx [[REG2:[0-9]+]],
51 ; CHECK: vbpermq {{[0-9]+}}, [[REG2]], [[REG1]]
52 ; CHECK-VSX: vbpermq {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
55 ; Function Attrs: nounwind
56 define void @test3() {
58 %__a.addr.i = alloca <16 x i8>, align 16
59 %0 = load <16 x i8>, <16 x i8>* @vsc, align 16
60 store <16 x i8> %0, <16 x i8>* %__a.addr.i, align 16
61 %1 = load <16 x i8>, <16 x i8>* %__a.addr.i, align 16
62 %2 = call <16 x i8> @llvm.ppc.altivec.vgbbd(<16 x i8> %1)
63 store <16 x i8> %2, <16 x i8>* @res_vsc, align 16
66 ; CHECK: lvx [[REG1:[0-9]+]],
67 ; CHECK: vgbbd {{[0-9]+}}, [[REG1]]
68 ; CHECK-VSX: vgbbd {{[0-9]+}}, {{[0-9]+}}
71 ; Function Attrs: nounwind
72 define void @test4() {
74 %__a.addr.i = alloca <16 x i8>, align 16
75 %0 = load <16 x i8>, <16 x i8>* @vuc, align 16
76 store <16 x i8> %0, <16 x i8>* %__a.addr.i, align 16
77 %1 = load <16 x i8>, <16 x i8>* %__a.addr.i, align 16
78 %2 = call <16 x i8> @llvm.ppc.altivec.vgbbd(<16 x i8> %1)
79 store <16 x i8> %2, <16 x i8>* @res_vuc, align 16
82 ; CHECK: lvx [[REG1:[0-9]+]],
83 ; CHECK: vgbbd {{[0-9]+}}, [[REG1]]
84 ; CHECK-VSX: vgbbd {{[0-9]+}}, {{[0-9]+}}
87 ; Function Attrs: nounwind readnone
88 declare <2 x i64> @llvm.ppc.altivec.vbpermq(<16 x i8>, <16 x i8>)
90 ; Function Attrs: nounwind readnone
91 declare <16 x i8> @llvm.ppc.altivec.vgbbd(<16 x i8>)