1 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | grep eqv | wc -l | grep 3 &&
2 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 -mcpu=g5 | grep andc | wc -l | grep 3 &&
3 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | grep orc | wc -l | grep 2 &&
4 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 -mcpu=g5 | grep nor | wc -l | grep 3 &&
5 ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | grep nand | wc -l | grep 1
7 int %EQV1(int %X, int %Y) {
13 int %EQV2(int %X, int %Y) {
19 int %EQV3(int %X, int %Y) {
25 int %ANDC1(int %X, int %Y) {
31 int %ANDC2(int %X, int %Y) {
37 int %ORC1(int %X, int %Y) {
43 int %ORC2(int %X, int %Y) {
54 int %NOR2(int %X, int %Y) {
60 int %NAND1(int %X, int %Y) {
66 void %VNOR(<4 x float>* %P, <4 x float>* %Q) {
67 %tmp = load <4 x float>* %P
68 %tmp = cast <4 x float> %tmp to <4 x int>
69 %tmp2 = load <4 x float>* %Q
70 %tmp2 = cast <4 x float> %tmp2 to <4 x int>
71 %tmp3 = or <4 x int> %tmp, %tmp2
72 %tmp4 = xor <4 x int> %tmp3, < int -1, int -1, int -1, int -1 >
73 %tmp4 = cast <4 x int> %tmp4 to <4 x float>
74 store <4 x float> %tmp4, <4 x float>* %P
78 void %VANDC(<4 x float>* %P, <4 x float>* %Q) {
79 %tmp = load <4 x float>* %P
80 %tmp = cast <4 x float> %tmp to <4 x int>
81 %tmp2 = load <4 x float>* %Q
82 %tmp2 = cast <4 x float> %tmp2 to <4 x int>
83 %tmp4 = xor <4 x int> %tmp2, < int -1, int -1, int -1, int -1 >
84 %tmp3 = and <4 x int> %tmp, %tmp4
85 %tmp4 = cast <4 x int> %tmp3 to <4 x float>
86 store <4 x float> %tmp4, <4 x float>* %P