llvm/test/CodeGen/X86/atomic{32|64}.ll: Unmark them out of XFAIL:win32.
[oota-llvm.git] / test / CodeGen / X86 / atomic64.ll
1 ; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X64
2
3 ; XFAIL: cygwin,mingw32
4
5 @sc64 = external global i64
6
7 define void @atomic_fetch_add64() nounwind {
8 ; X64:   atomic_fetch_add64
9 entry:
10   %t1 = atomicrmw add  i64* @sc64, i64 1 acquire
11 ; X64:       lock
12 ; X64:       incq
13   %t2 = atomicrmw add  i64* @sc64, i64 3 acquire
14 ; X64:       lock
15 ; X64:       addq $3
16   %t3 = atomicrmw add  i64* @sc64, i64 5 acquire
17 ; X64:       lock
18 ; X64:       xaddq
19   %t4 = atomicrmw add  i64* @sc64, i64 %t3 acquire
20 ; X64:       lock
21 ; X64:       addq
22   ret void
23 ; X64:       ret
24 }
25
26 define void @atomic_fetch_sub64() nounwind {
27 ; X64:   atomic_fetch_sub64
28   %t1 = atomicrmw sub  i64* @sc64, i64 1 acquire
29 ; X64:       lock
30 ; X64:       decq
31   %t2 = atomicrmw sub  i64* @sc64, i64 3 acquire
32 ; X64:       lock
33 ; X64:       subq $3
34   %t3 = atomicrmw sub  i64* @sc64, i64 5 acquire
35 ; X64:       lock
36 ; X64:       xaddq
37   %t4 = atomicrmw sub  i64* @sc64, i64 %t3 acquire
38 ; X64:       lock
39 ; X64:       subq
40   ret void
41 ; X64:       ret
42 }
43
44 define void @atomic_fetch_and64() nounwind {
45 ; X64:   atomic_fetch_and64
46   %t1 = atomicrmw and  i64* @sc64, i64 3 acquire
47 ; X64:       lock
48 ; X64:       andq $3
49   %t2 = atomicrmw and  i64* @sc64, i64 5 acquire
50 ; X64:       andq
51 ; X64:       lock
52 ; X64:       cmpxchgq
53   %t3 = atomicrmw and  i64* @sc64, i64 %t2 acquire
54 ; X64:       lock
55 ; X64:       andq
56   ret void
57 ; X64:       ret
58 }
59
60 define void @atomic_fetch_or64() nounwind {
61 ; X64:   atomic_fetch_or64
62   %t1 = atomicrmw or   i64* @sc64, i64 3 acquire
63 ; X64:       lock
64 ; X64:       orq $3
65   %t2 = atomicrmw or   i64* @sc64, i64 5 acquire
66 ; X64:       orq
67 ; X64:       lock
68 ; X64:       cmpxchgq
69   %t3 = atomicrmw or   i64* @sc64, i64 %t2 acquire
70 ; X64:       lock
71 ; X64:       orq
72   ret void
73 ; X64:       ret
74 }
75
76 define void @atomic_fetch_xor64() nounwind {
77 ; X64:   atomic_fetch_xor64
78   %t1 = atomicrmw xor  i64* @sc64, i64 3 acquire
79 ; X64:       lock
80 ; X64:       xorq $3
81   %t2 = atomicrmw xor  i64* @sc64, i64 5 acquire
82 ; X64:       xorq
83 ; X64:       lock
84 ; X64:       cmpxchgq
85   %t3 = atomicrmw xor  i64* @sc64, i64 %t2 acquire
86 ; X64:       lock
87 ; X64:       xorq
88   ret void
89 ; X64:       ret
90 }
91
92 define void @atomic_fetch_nand64(i64 %x) nounwind {
93 ; X64:   atomic_fetch_nand64
94 ; X32:   atomic_fetch_nand64
95   %t1 = atomicrmw nand i64* @sc64, i64 %x acquire
96 ; X64:       andq
97 ; X64:       notq
98 ; X64:       lock
99 ; X64:       cmpxchgq
100 ; X32:       andl
101 ; X32:       andl
102 ; X32:       notl
103 ; X32:       notl
104 ; X32:       lock
105 ; X32:       cmpxchg8b
106   ret void
107 ; X64:       ret
108 ; X32:       ret
109 }
110
111 define void @atomic_fetch_max64(i64 %x) nounwind {
112   %t1 = atomicrmw max  i64* @sc64, i64 %x acquire
113 ; X64:       cmpq
114 ; X64:       cmov
115 ; X64:       lock
116 ; X64:       cmpxchgq
117
118 ; X32:       cmpl
119 ; X32:       cmpl
120 ; X32:       cmov
121 ; X32:       cmov
122 ; X32:       cmov
123 ; X32:       lock
124 ; X32:       cmpxchg8b
125   ret void
126 ; X64:       ret
127 ; X32:       ret
128 }
129
130 define void @atomic_fetch_min64(i64 %x) nounwind {
131   %t1 = atomicrmw min  i64* @sc64, i64 %x acquire
132 ; X64:       cmpq
133 ; X64:       cmov
134 ; X64:       lock
135 ; X64:       cmpxchgq
136
137 ; X32:       cmpl
138 ; X32:       cmpl
139 ; X32:       cmov
140 ; X32:       cmov
141 ; X32:       cmov
142 ; X32:       lock
143 ; X32:       cmpxchg8b
144   ret void
145 ; X64:       ret
146 ; X32:       ret
147 }
148
149 define void @atomic_fetch_umax64(i64 %x) nounwind {
150   %t1 = atomicrmw umax i64* @sc64, i64 %x acquire
151 ; X64:       cmpq
152 ; X64:       cmov
153 ; X64:       lock
154 ; X64:       cmpxchgq
155
156 ; X32:       cmpl
157 ; X32:       cmpl
158 ; X32:       cmov
159 ; X32:       cmov
160 ; X32:       cmov
161 ; X32:       lock
162 ; X32:       cmpxchg8b
163   ret void
164 ; X64:       ret
165 ; X32:       ret
166 }
167
168 define void @atomic_fetch_umin64(i64 %x) nounwind {
169   %t1 = atomicrmw umin i64* @sc64, i64 %x acquire
170 ; X64:       cmpq
171 ; X64:       cmov
172 ; X64:       lock
173 ; X64:       cmpxchgq
174
175 ; X32:       cmpl
176 ; X32:       cmpl
177 ; X32:       cmov
178 ; X32:       cmov
179 ; X32:       cmov
180 ; X32:       lock
181 ; X32:       cmpxchg8b
182   ret void
183 ; X64:       ret
184 ; X32:       ret
185 }
186
187 define void @atomic_fetch_cmpxchg64() nounwind {
188   %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire
189 ; X64:       lock
190 ; X64:       cmpxchgq
191 ; X32:       lock
192 ; X32:       cmpxchg8b
193   ret void
194 ; X64:       ret
195 ; X32:       ret
196 }
197
198 define void @atomic_fetch_store64(i64 %x) nounwind {
199   store atomic i64 %x, i64* @sc64 release, align 8
200 ; X64-NOT:   lock
201 ; X64:       movq
202 ; X32:       lock
203 ; X32:       cmpxchg8b
204   ret void
205 ; X64:       ret
206 ; X32:       ret
207 }
208
209 define void @atomic_fetch_swap64(i64 %x) nounwind {
210   %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
211 ; X64-NOT:   lock
212 ; X64:       xchgq
213 ; X32:       lock
214 ; X32:       xchg8b
215   ret void
216 ; X64:       ret
217 ; X32:       ret
218 }