Delete trailing whitespace; NFC
[oota-llvm.git] / lib / Target / AArch64 / AArch64InstrAtomics.td
1 //=- AArch64InstrAtomics.td - AArch64 Atomic codegen support -*- tablegen -*-=//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // AArch64 Atomic operand code-gen constructs.
11 //
12 //===----------------------------------------------------------------------===//
13
14 //===----------------------------------
15 // Atomic fences
16 //===----------------------------------
17 def : Pat<(atomic_fence (i64 4), (imm)), (DMB (i32 0x9))>;
18 def : Pat<(atomic_fence (imm), (imm)), (DMB (i32 0xb))>;
19
20 //===----------------------------------
21 // Atomic loads
22 //===----------------------------------
23
24 // When they're actually atomic, only one addressing mode (GPR64sp) is
25 // supported, but when they're relaxed and anything can be used, all the
26 // standard modes would be valid and may give efficiency gains.
27
28 // A atomic load operation that actually needs acquire semantics.
29 class acquiring_load<PatFrag base>
30   : PatFrag<(ops node:$ptr), (base node:$ptr), [{
31   AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
32   return isAtLeastAcquire(Ordering);
33 }]>;
34
35 // An atomic load operation that does not need either acquire or release
36 // semantics.
37 class relaxed_load<PatFrag base>
38   : PatFrag<(ops node:$ptr), (base node:$ptr), [{
39   AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
40   return !isAtLeastAcquire(Ordering);
41 }]>;
42
43 // 8-bit loads
44 def : Pat<(acquiring_load<atomic_load_8>  GPR64sp:$ptr), (LDARB GPR64sp:$ptr)>;
45 def : Pat<(relaxed_load<atomic_load_8> (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm,
46                                                      ro_Wextend8:$offset)),
47           (LDRBBroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$offset)>;
48 def : Pat<(relaxed_load<atomic_load_8> (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm,
49                                                      ro_Xextend8:$offset)),
50           (LDRBBroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$offset)>;
51 def : Pat<(relaxed_load<atomic_load_8> (am_indexed8 GPR64sp:$Rn,
52                                                     uimm12s1:$offset)),
53           (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
54 def : Pat<(relaxed_load<atomic_load_8>
55                (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
56           (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
57
58 // 16-bit loads
59 def : Pat<(acquiring_load<atomic_load_16> GPR64sp:$ptr), (LDARH GPR64sp:$ptr)>;
60 def : Pat<(relaxed_load<atomic_load_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
61                                                        ro_Wextend16:$extend)),
62           (LDRHHroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
63 def : Pat<(relaxed_load<atomic_load_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
64                                                        ro_Xextend16:$extend)),
65           (LDRHHroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
66 def : Pat<(relaxed_load<atomic_load_16> (am_indexed16 GPR64sp:$Rn,
67                                                       uimm12s2:$offset)),
68           (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
69 def : Pat<(relaxed_load<atomic_load_16>
70                (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
71           (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
72
73 // 32-bit loads
74 def : Pat<(acquiring_load<atomic_load_32> GPR64sp:$ptr), (LDARW GPR64sp:$ptr)>;
75 def : Pat<(relaxed_load<atomic_load_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
76                                                        ro_Wextend32:$extend)),
77           (LDRWroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
78 def : Pat<(relaxed_load<atomic_load_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
79                                                        ro_Xextend32:$extend)),
80           (LDRWroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
81 def : Pat<(relaxed_load<atomic_load_32> (am_indexed32 GPR64sp:$Rn,
82                                                       uimm12s4:$offset)),
83           (LDRWui GPR64sp:$Rn, uimm12s4:$offset)>;
84 def : Pat<(relaxed_load<atomic_load_32>
85                (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
86           (LDURWi GPR64sp:$Rn, simm9:$offset)>;
87
88 // 64-bit loads
89 def : Pat<(acquiring_load<atomic_load_64> GPR64sp:$ptr), (LDARX GPR64sp:$ptr)>;
90 def : Pat<(relaxed_load<atomic_load_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
91                                                        ro_Wextend64:$extend)),
92           (LDRXroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
93 def : Pat<(relaxed_load<atomic_load_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
94                                                        ro_Xextend64:$extend)),
95           (LDRXroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
96 def : Pat<(relaxed_load<atomic_load_64> (am_indexed64 GPR64sp:$Rn,
97                                                       uimm12s8:$offset)),
98           (LDRXui GPR64sp:$Rn, uimm12s8:$offset)>;
99 def : Pat<(relaxed_load<atomic_load_64>
100                (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
101           (LDURXi GPR64sp:$Rn, simm9:$offset)>;
102
103 //===----------------------------------
104 // Atomic stores
105 //===----------------------------------
106
107 // When they're actually atomic, only one addressing mode (GPR64sp) is
108 // supported, but when they're relaxed and anything can be used, all the
109 // standard modes would be valid and may give efficiency gains.
110
111 // A store operation that actually needs release semantics.
112 class releasing_store<PatFrag base>
113   : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
114   AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
115   assert(Ordering != AcquireRelease && "unexpected store ordering");
116   return isAtLeastRelease(Ordering);
117 }]>;
118
119 // An atomic store operation that doesn't actually need to be atomic on AArch64.
120 class relaxed_store<PatFrag base>
121   : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
122   AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getOrdering();
123   return !isAtLeastRelease(Ordering);
124 }]>;
125
126 // 8-bit stores
127 def : Pat<(releasing_store<atomic_store_8> GPR64sp:$ptr, GPR32:$val),
128           (STLRB GPR32:$val, GPR64sp:$ptr)>;
129 def : Pat<(relaxed_store<atomic_store_8>
130                (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend),
131                GPR32:$val),
132           (STRBBroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend)>;
133 def : Pat<(relaxed_store<atomic_store_8>
134                (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend),
135                GPR32:$val),
136           (STRBBroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend)>;
137 def : Pat<(relaxed_store<atomic_store_8>
138                (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset), GPR32:$val),
139           (STRBBui GPR32:$val, GPR64sp:$Rn, uimm12s1:$offset)>;
140 def : Pat<(relaxed_store<atomic_store_8>
141                (am_unscaled8 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
142           (STURBBi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
143
144 // 16-bit stores
145 def : Pat<(releasing_store<atomic_store_16> GPR64sp:$ptr, GPR32:$val),
146           (STLRH GPR32:$val, GPR64sp:$ptr)>;
147 def : Pat<(relaxed_store<atomic_store_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
148                                                          ro_Wextend16:$extend),
149                                           GPR32:$val),
150           (STRHHroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
151 def : Pat<(relaxed_store<atomic_store_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
152                                                          ro_Xextend16:$extend),
153                                           GPR32:$val),
154           (STRHHroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
155 def : Pat<(relaxed_store<atomic_store_16>
156               (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset), GPR32:$val),
157           (STRHHui GPR32:$val, GPR64sp:$Rn, uimm12s2:$offset)>;
158 def : Pat<(relaxed_store<atomic_store_16>
159                (am_unscaled16 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
160           (STURHHi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
161
162 // 32-bit stores
163 def : Pat<(releasing_store<atomic_store_32> GPR64sp:$ptr, GPR32:$val),
164           (STLRW GPR32:$val, GPR64sp:$ptr)>;
165 def : Pat<(relaxed_store<atomic_store_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
166                                                          ro_Wextend32:$extend),
167                                           GPR32:$val),
168           (STRWroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
169 def : Pat<(relaxed_store<atomic_store_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
170                                                          ro_Xextend32:$extend),
171                                           GPR32:$val),
172           (STRWroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
173 def : Pat<(relaxed_store<atomic_store_32>
174               (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset), GPR32:$val),
175           (STRWui GPR32:$val, GPR64sp:$Rn, uimm12s4:$offset)>;
176 def : Pat<(relaxed_store<atomic_store_32>
177                (am_unscaled32 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
178           (STURWi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
179
180 // 64-bit stores
181 def : Pat<(releasing_store<atomic_store_64> GPR64sp:$ptr, GPR64:$val),
182           (STLRX GPR64:$val, GPR64sp:$ptr)>;
183 def : Pat<(relaxed_store<atomic_store_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
184                                                          ro_Wextend16:$extend),
185                                           GPR64:$val),
186           (STRXroW GPR64:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
187 def : Pat<(relaxed_store<atomic_store_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
188                                                          ro_Xextend16:$extend),
189                                           GPR64:$val),
190           (STRXroX GPR64:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
191 def : Pat<(relaxed_store<atomic_store_64>
192               (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset), GPR64:$val),
193           (STRXui GPR64:$val, GPR64sp:$Rn, uimm12s8:$offset)>;
194 def : Pat<(relaxed_store<atomic_store_64>
195                (am_unscaled64 GPR64sp:$Rn, simm9:$offset), GPR64:$val),
196           (STURXi GPR64:$val, GPR64sp:$Rn, simm9:$offset)>;
197
198 //===----------------------------------
199 // Low-level exclusive operations
200 //===----------------------------------
201
202 // Load-exclusives.
203
204 def ldxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
205   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
206 }]>;
207
208 def ldxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
209   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
210 }]>;
211
212 def ldxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
213   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
214 }]>;
215
216 def ldxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
217   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
218 }]>;
219
220 def : Pat<(ldxr_1 GPR64sp:$addr),
221           (SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
222 def : Pat<(ldxr_2 GPR64sp:$addr),
223           (SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
224 def : Pat<(ldxr_4 GPR64sp:$addr),
225           (SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
226 def : Pat<(ldxr_8 GPR64sp:$addr), (LDXRX GPR64sp:$addr)>;
227
228 def : Pat<(and (ldxr_1 GPR64sp:$addr), 0xff),
229           (SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
230 def : Pat<(and (ldxr_2 GPR64sp:$addr), 0xffff),
231           (SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
232 def : Pat<(and (ldxr_4 GPR64sp:$addr), 0xffffffff),
233           (SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
234
235 // Load-exclusives.
236
237 def ldaxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
238   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
239 }]>;
240
241 def ldaxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
242   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
243 }]>;
244
245 def ldaxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
246   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
247 }]>;
248
249 def ldaxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
250   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
251 }]>;
252
253 def : Pat<(ldaxr_1 GPR64sp:$addr),
254           (SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
255 def : Pat<(ldaxr_2 GPR64sp:$addr),
256           (SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
257 def : Pat<(ldaxr_4 GPR64sp:$addr),
258           (SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
259 def : Pat<(ldaxr_8 GPR64sp:$addr), (LDAXRX GPR64sp:$addr)>;
260
261 def : Pat<(and (ldaxr_1 GPR64sp:$addr), 0xff),
262           (SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
263 def : Pat<(and (ldaxr_2 GPR64sp:$addr), 0xffff),
264           (SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
265 def : Pat<(and (ldaxr_4 GPR64sp:$addr), 0xffffffff),
266           (SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
267
268 // Store-exclusives.
269
270 def stxr_1 : PatFrag<(ops node:$val, node:$ptr),
271                      (int_aarch64_stxr node:$val, node:$ptr), [{
272   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
273 }]>;
274
275 def stxr_2 : PatFrag<(ops node:$val, node:$ptr),
276                      (int_aarch64_stxr node:$val, node:$ptr), [{
277   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
278 }]>;
279
280 def stxr_4 : PatFrag<(ops node:$val, node:$ptr),
281                      (int_aarch64_stxr node:$val, node:$ptr), [{
282   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
283 }]>;
284
285 def stxr_8 : PatFrag<(ops node:$val, node:$ptr),
286                      (int_aarch64_stxr node:$val, node:$ptr), [{
287   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
288 }]>;
289
290
291 def : Pat<(stxr_1 GPR64:$val, GPR64sp:$addr),
292           (STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
293 def : Pat<(stxr_2 GPR64:$val, GPR64sp:$addr),
294           (STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
295 def : Pat<(stxr_4 GPR64:$val, GPR64sp:$addr),
296           (STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
297 def : Pat<(stxr_8 GPR64:$val, GPR64sp:$addr),
298           (STXRX GPR64:$val, GPR64sp:$addr)>;
299
300 def : Pat<(stxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
301           (STXRB GPR32:$val, GPR64sp:$addr)>;
302 def : Pat<(stxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
303           (STXRH GPR32:$val, GPR64sp:$addr)>;
304 def : Pat<(stxr_4 (zext GPR32:$val), GPR64sp:$addr),
305           (STXRW GPR32:$val, GPR64sp:$addr)>;
306
307 def : Pat<(stxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
308           (STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
309 def : Pat<(stxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
310           (STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
311 def : Pat<(stxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
312           (STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
313
314 // Store-release-exclusives.
315
316 def stlxr_1 : PatFrag<(ops node:$val, node:$ptr),
317                      (int_aarch64_stlxr node:$val, node:$ptr), [{
318   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
319 }]>;
320
321 def stlxr_2 : PatFrag<(ops node:$val, node:$ptr),
322                      (int_aarch64_stlxr node:$val, node:$ptr), [{
323   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
324 }]>;
325
326 def stlxr_4 : PatFrag<(ops node:$val, node:$ptr),
327                      (int_aarch64_stlxr node:$val, node:$ptr), [{
328   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
329 }]>;
330
331 def stlxr_8 : PatFrag<(ops node:$val, node:$ptr),
332                      (int_aarch64_stlxr node:$val, node:$ptr), [{
333   return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
334 }]>;
335
336
337 def : Pat<(stlxr_1 GPR64:$val, GPR64sp:$addr),
338           (STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
339 def : Pat<(stlxr_2 GPR64:$val, GPR64sp:$addr),
340           (STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
341 def : Pat<(stlxr_4 GPR64:$val, GPR64sp:$addr),
342           (STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
343 def : Pat<(stlxr_8 GPR64:$val, GPR64sp:$addr),
344           (STLXRX GPR64:$val, GPR64sp:$addr)>;
345
346 def : Pat<(stlxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
347           (STLXRB GPR32:$val, GPR64sp:$addr)>;
348 def : Pat<(stlxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
349           (STLXRH GPR32:$val, GPR64sp:$addr)>;
350 def : Pat<(stlxr_4 (zext GPR32:$val), GPR64sp:$addr),
351           (STLXRW GPR32:$val, GPR64sp:$addr)>;
352
353 def : Pat<(stlxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
354           (STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
355 def : Pat<(stlxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
356           (STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
357 def : Pat<(stlxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
358           (STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
359
360
361 // And clear exclusive.
362
363 def : Pat<(int_aarch64_clrex), (CLREX 0xf)>;