1 ; RUN: llvm-as < %s | llc -march=x86 -mattr=+mmx
3 ;; A basic sanity check to make sure that MMX arithmetic actually compiles.
5 define void @foo(<8 x i8>* %A, <8 x i8>* %B) {
7 %tmp5 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
8 %tmp7 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
9 %tmp8 = add <8 x i8> %tmp5, %tmp7 ; <<8 x i8>> [#uses=2]
10 store <8 x i8> %tmp8, <8 x i8>* %A
11 %tmp14 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
12 %tmp25 = tail call <8 x i8> @llvm.x86.mmx.padds.b( <8 x i8> %tmp14, <8 x i8> %tmp8 ) ; <<8 x i8>> [#uses=2]
13 store <8 x i8> %tmp25, <8 x i8>* %B
14 %tmp36 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
15 %tmp49 = tail call <8 x i8> @llvm.x86.mmx.paddus.b( <8 x i8> %tmp36, <8 x i8> %tmp25 ) ; <<8 x i8>> [#uses=2]
16 store <8 x i8> %tmp49, <8 x i8>* %B
17 %tmp58 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
18 %tmp61 = sub <8 x i8> %tmp58, %tmp49 ; <<8 x i8>> [#uses=2]
19 store <8 x i8> %tmp61, <8 x i8>* %B
20 %tmp64 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
21 %tmp80 = tail call <8 x i8> @llvm.x86.mmx.psubs.b( <8 x i8> %tmp61, <8 x i8> %tmp64 ) ; <<8 x i8>> [#uses=2]
22 store <8 x i8> %tmp80, <8 x i8>* %A
23 %tmp89 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
24 %tmp105 = tail call <8 x i8> @llvm.x86.mmx.psubus.b( <8 x i8> %tmp80, <8 x i8> %tmp89 ) ; <<8 x i8>> [#uses=1]
25 store <8 x i8> %tmp105, <8 x i8>* %A
26 tail call void @llvm.x86.mmx.emms( )
30 define void @baz(<2 x i32>* %A, <2 x i32>* %B) {
32 %tmp1 = load <2 x i32>* %A ; <<2 x i32>> [#uses=1]
33 %tmp3 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
34 %tmp4 = add <2 x i32> %tmp1, %tmp3 ; <<2 x i32>> [#uses=2]
35 store <2 x i32> %tmp4, <2 x i32>* %A
36 %tmp9 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
37 %tmp10 = sub <2 x i32> %tmp4, %tmp9 ; <<2 x i32>> [#uses=1]
38 store <2 x i32> %tmp10, <2 x i32>* %B
39 tail call void @llvm.x86.mmx.emms( )
43 define void @bar(<4 x i16>* %A, <4 x i16>* %B) {
45 %tmp5 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
46 %tmp7 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
47 %tmp8 = add <4 x i16> %tmp5, %tmp7 ; <<4 x i16>> [#uses=2]
48 store <4 x i16> %tmp8, <4 x i16>* %A
49 %tmp14 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
50 %tmp25 = tail call <4 x i16> @llvm.x86.mmx.padds.w( <4 x i16> %tmp14, <4 x i16> %tmp8 ) ; <<4 x i16>> [#uses=2]
51 store <4 x i16> %tmp25, <4 x i16>* %B
52 %tmp36 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
53 %tmp49 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp36, <4 x i16> %tmp25 ) ; <<4 x i16>> [#uses=2]
54 store <4 x i16> %tmp49, <4 x i16>* %B
55 %tmp58 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
56 %tmp61 = sub <4 x i16> %tmp58, %tmp49 ; <<4 x i16>> [#uses=2]
57 store <4 x i16> %tmp61, <4 x i16>* %B
58 %tmp64 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
59 %tmp80 = tail call <4 x i16> @llvm.x86.mmx.psubs.w( <4 x i16> %tmp61, <4 x i16> %tmp64 ) ; <<4 x i16>> [#uses=2]
60 store <4 x i16> %tmp80, <4 x i16>* %A
61 %tmp89 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
62 %tmp105 = tail call <4 x i16> @llvm.x86.mmx.psubus.w( <4 x i16> %tmp80, <4 x i16> %tmp89 ) ; <<4 x i16>> [#uses=1]
63 store <4 x i16> %tmp105, <4 x i16>* %A
64 tail call void @llvm.x86.mmx.emms( )
68 declare <4 x i16> @llvm.x86.mmx.padds.w(<4 x i16>, <4 x i16>)
70 declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>)
72 declare <4 x i16> @llvm.x86.mmx.psubs.w(<4 x i16>, <4 x i16>)
74 declare <4 x i16> @llvm.x86.mmx.psubus.w(<4 x i16>, <4 x i16>)
76 declare <8 x i8> @llvm.x86.mmx.padds.b(<8 x i8>, <8 x i8>)
78 declare <8 x i8> @llvm.x86.mmx.paddus.b(<8 x i8>, <8 x i8>)
80 declare <8 x i8> @llvm.x86.mmx.psubs.b(<8 x i8>, <8 x i8>)
82 declare <8 x i8> @llvm.x86.mmx.psubus.b(<8 x i8>, <8 x i8>)
84 declare void @llvm.x86.mmx.emms()