Re-work X86 code generation of atomic ops with spin-loop
[oota-llvm.git] / test / CodeGen / X86 / atomic16.ll
1 ; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 | FileCheck %s --check-prefix X64
2 ; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32
3
4 @sc16 = external global i16
5
6 define void @atomic_fetch_add16() nounwind {
7 ; X64:   atomic_fetch_add16
8 ; X32:   atomic_fetch_add16
9 entry:
10 ; 32-bit
11   %t1 = atomicrmw add  i16* @sc16, i16 1 acquire
12 ; X64:       lock
13 ; X64:       incw
14 ; X32:       lock
15 ; X32:       incw
16   %t2 = atomicrmw add  i16* @sc16, i16 3 acquire
17 ; X64:       lock
18 ; X64:       addw $3
19 ; X32:       lock
20 ; X32:       addw $3
21   %t3 = atomicrmw add  i16* @sc16, i16 5 acquire
22 ; X64:       lock
23 ; X64:       xaddw
24 ; X32:       lock
25 ; X32:       xaddw
26   %t4 = atomicrmw add  i16* @sc16, i16 %t3 acquire
27 ; X64:       lock
28 ; X64:       addw
29 ; X32:       lock
30 ; X32:       addw
31   ret void
32 ; X64:       ret
33 ; X32:       ret
34 }
35
36 define void @atomic_fetch_sub16() nounwind {
37 ; X64:   atomic_fetch_sub16
38 ; X32:   atomic_fetch_sub16
39   %t1 = atomicrmw sub  i16* @sc16, i16 1 acquire
40 ; X64:       lock
41 ; X64:       decw
42 ; X32:       lock
43 ; X32:       decw
44   %t2 = atomicrmw sub  i16* @sc16, i16 3 acquire
45 ; X64:       lock
46 ; X64:       subw $3
47 ; X32:       lock
48 ; X32:       subw $3
49   %t3 = atomicrmw sub  i16* @sc16, i16 5 acquire
50 ; X64:       lock
51 ; X64:       xaddw
52 ; X32:       lock
53 ; X32:       xaddw
54   %t4 = atomicrmw sub  i16* @sc16, i16 %t3 acquire
55 ; X64:       lock
56 ; X64:       subw
57 ; X32:       lock
58 ; X32:       subw
59   ret void
60 ; X64:       ret
61 ; X32:       ret
62 }
63
64 define void @atomic_fetch_and16() nounwind {
65 ; X64:   atomic_fetch_and16
66 ; X32:   atomic_fetch_and16
67   %t1 = atomicrmw and  i16* @sc16, i16 3 acquire
68 ; X64:       lock
69 ; X64:       andw $3
70 ; X32:       lock
71 ; X32:       andw $3
72   %t2 = atomicrmw and  i16* @sc16, i16 5 acquire
73 ; X64:       andw
74 ; X64:       lock
75 ; X64:       cmpxchgw
76 ; X32:       andw
77 ; X32:       lock
78 ; X32:       cmpxchgw
79   %t3 = atomicrmw and  i16* @sc16, i16 %t2 acquire
80 ; X64:       lock
81 ; X64:       andw
82 ; X32:       lock
83 ; X32:       andw
84   ret void
85 ; X64:       ret
86 ; X32:       ret
87 }
88
89 define void @atomic_fetch_or16() nounwind {
90 ; X64:   atomic_fetch_or16
91 ; X32:   atomic_fetch_or16
92   %t1 = atomicrmw or   i16* @sc16, i16 3 acquire
93 ; X64:       lock
94 ; X64:       orw $3
95 ; X32:       lock
96 ; X32:       orw $3
97   %t2 = atomicrmw or   i16* @sc16, i16 5 acquire
98 ; X64:       orw
99 ; X64:       lock
100 ; X64:       cmpxchgw
101 ; X32:       orw
102 ; X32:       lock
103 ; X32:       cmpxchgw
104   %t3 = atomicrmw or   i16* @sc16, i16 %t2 acquire
105 ; X64:       lock
106 ; X64:       orw
107 ; X32:       lock
108 ; X32:       orw
109   ret void
110 ; X64:       ret
111 ; X32:       ret
112 }
113
114 define void @atomic_fetch_xor16() nounwind {
115 ; X64:   atomic_fetch_xor16
116 ; X32:   atomic_fetch_xor16
117   %t1 = atomicrmw xor  i16* @sc16, i16 3 acquire
118 ; X64:       lock
119 ; X64:       xorw $3
120 ; X32:       lock
121 ; X32:       xorw $3
122   %t2 = atomicrmw xor  i16* @sc16, i16 5 acquire
123 ; X64:       xorw
124 ; X64:       lock
125 ; X64:       cmpxchgw
126 ; X32:       xorw
127 ; X32:       lock
128 ; X32:       cmpxchgw
129   %t3 = atomicrmw xor  i16* @sc16, i16 %t2 acquire
130 ; X64:       lock
131 ; X64:       xorw
132 ; X32:       lock
133 ; X32:       xorw
134   ret void
135 ; X64:       ret
136 ; X32:       ret
137 }
138
139 define void @atomic_fetch_nand16(i16 %x) nounwind {
140 ; X64:   atomic_fetch_nand16
141 ; X32:   atomic_fetch_nand16
142   %t1 = atomicrmw nand i16* @sc16, i16 %x acquire
143 ; X64:       andw
144 ; X64:       notw
145 ; X64:       lock
146 ; X64:       cmpxchgw
147 ; X32:       andw
148 ; X32:       notw
149 ; X32:       lock
150 ; X32:       cmpxchgw
151   ret void
152 ; X64:       ret
153 ; X32:       ret
154 }
155
156 define void @atomic_fetch_max16(i16 %x) nounwind {
157   %t1 = atomicrmw max  i16* @sc16, i16 %x acquire
158 ; X64:       cmpw
159 ; X64:       cmov
160 ; X64:       lock
161 ; X64:       cmpxchgw
162
163 ; X32:       cmpw
164 ; X32:       cmov
165 ; X32:       lock
166 ; X32:       cmpxchgw
167   ret void
168 ; X64:       ret
169 ; X32:       ret
170 }
171
172 define void @atomic_fetch_min16(i16 %x) nounwind {
173   %t1 = atomicrmw min  i16* @sc16, i16 %x acquire
174 ; X64:       cmpw
175 ; X64:       cmov
176 ; X64:       lock
177 ; X64:       cmpxchgw
178
179 ; X32:       cmpw
180 ; X32:       cmov
181 ; X32:       lock
182 ; X32:       cmpxchgw
183   ret void
184 ; X64:       ret
185 ; X32:       ret
186 }
187
188 define void @atomic_fetch_umax16(i16 %x) nounwind {
189   %t1 = atomicrmw umax i16* @sc16, i16 %x acquire
190 ; X64:       cmpw
191 ; X64:       cmov
192 ; X64:       lock
193 ; X64:       cmpxchgw
194
195 ; X32:       cmpw
196 ; X32:       cmov
197 ; X32:       lock
198 ; X32:       cmpxchgw
199   ret void
200 ; X64:       ret
201 ; X32:       ret
202 }
203
204 define void @atomic_fetch_umin16(i16 %x) nounwind {
205   %t1 = atomicrmw umin i16* @sc16, i16 %x acquire
206 ; X64:       cmpw
207 ; X64:       cmov
208 ; X64:       lock
209 ; X64:       cmpxchgw
210 ; X32:       cmpw
211 ; X32:       cmov
212 ; X32:       lock
213 ; X32:       cmpxchgw
214   ret void
215 ; X64:       ret
216 ; X32:       ret
217 }
218
219 define void @atomic_fetch_cmpxchg16() nounwind {
220   %t1 = cmpxchg i16* @sc16, i16 0, i16 1 acquire
221 ; X64:       lock
222 ; X64:       cmpxchgw
223 ; X32:       lock
224 ; X32:       cmpxchgw
225   ret void
226 ; X64:       ret
227 ; X32:       ret
228 }
229
230 define void @atomic_fetch_store16(i16 %x) nounwind {
231   store atomic i16 %x, i16* @sc16 release, align 4
232 ; X64-NOT:   lock
233 ; X64:       movw
234 ; X32-NOT:   lock
235 ; X32:       movw
236   ret void
237 ; X64:       ret
238 ; X32:       ret
239 }
240
241 define void @atomic_fetch_swap16(i16 %x) nounwind {
242   %t1 = atomicrmw xchg i16* @sc16, i16 %x acquire
243 ; X64-NOT:   lock
244 ; X64:       xchgw
245 ; X32-NOT:   lock
246 ; X32:       xchgw
247   ret void
248 ; X64:       ret
249 ; X32:       ret
250 }