1 //===-- AArch64MCTargetDesc.cpp - AArch64 Target Descriptions -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file provides AArch64 specific target descriptions.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64MCTargetDesc.h"
15 #include "AArch64BaseInfo.h"
16 #include "AArch64ELFStreamer.h"
17 #include "AArch64MCAsmInfo.h"
18 #include "InstPrinter/AArch64InstPrinter.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/MC/MCCodeGenInfo.h"
23 #include "llvm/MC/MCInstrAnalysis.h"
24 #include "llvm/MC/MCInstrInfo.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCStreamer.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/Support/Regex.h"
29 #include "llvm/Support/TargetRegistry.h"
30 #include "llvm/Support/ErrorHandling.h"
32 #define GET_REGINFO_MC_DESC
33 #include "AArch64GenRegisterInfo.inc"
35 #define GET_INSTRINFO_MC_DESC
36 #include "AArch64GenInstrInfo.inc"
38 #define GET_SUBTARGETINFO_MC_DESC
39 #include "AArch64GenSubtargetInfo.inc"
43 StringRef NamedImmMapper::toString(uint32_t Value, bool &Valid) const {
44 for (unsigned i = 0; i < NumPairs; ++i) {
45 if (Pairs[i].Value == Value) {
55 uint32_t NamedImmMapper::fromString(StringRef Name, bool &Valid) const {
56 std::string LowerCaseName = Name.lower();
57 for (unsigned i = 0; i < NumPairs; ++i) {
58 if (Pairs[i].Name == LowerCaseName) {
60 return Pairs[i].Value;
68 bool NamedImmMapper::validImm(uint32_t Value) const {
69 return Value < TooBigImm;
72 const NamedImmMapper::Mapping A64AT::ATMapper::ATPairs[] = {
87 A64AT::ATMapper::ATMapper()
88 : NamedImmMapper(ATPairs, 0) {}
90 const NamedImmMapper::Mapping A64DB::DBarrierMapper::DBarrierPairs[] = {
105 A64DB::DBarrierMapper::DBarrierMapper()
106 : NamedImmMapper(DBarrierPairs, 16u) {}
108 const NamedImmMapper::Mapping A64DC::DCMapper::DCPairs[] = {
119 A64DC::DCMapper::DCMapper()
120 : NamedImmMapper(DCPairs, 0) {}
122 const NamedImmMapper::Mapping A64IC::ICMapper::ICPairs[] = {
123 {"ialluis", IALLUIS},
128 A64IC::ICMapper::ICMapper()
129 : NamedImmMapper(ICPairs, 0) {}
131 const NamedImmMapper::Mapping A64ISB::ISBMapper::ISBPairs[] = {
135 A64ISB::ISBMapper::ISBMapper()
136 : NamedImmMapper(ISBPairs, 16) {}
138 const NamedImmMapper::Mapping A64PRFM::PRFMMapper::PRFMPairs[] = {
139 {"pldl1keep", PLDL1KEEP},
140 {"pldl1strm", PLDL1STRM},
141 {"pldl2keep", PLDL2KEEP},
142 {"pldl2strm", PLDL2STRM},
143 {"pldl3keep", PLDL3KEEP},
144 {"pldl3strm", PLDL3STRM},
145 {"pstl1keep", PSTL1KEEP},
146 {"pstl1strm", PSTL1STRM},
147 {"pstl2keep", PSTL2KEEP},
148 {"pstl2strm", PSTL2STRM},
149 {"pstl3keep", PSTL3KEEP},
150 {"pstl3strm", PSTL3STRM}
153 A64PRFM::PRFMMapper::PRFMMapper()
154 : NamedImmMapper(PRFMPairs, 32) {}
156 const NamedImmMapper::Mapping A64PState::PStateMapper::PStatePairs[] = {
158 {"daifset", DAIFSet},
162 A64PState::PStateMapper::PStateMapper()
163 : NamedImmMapper(PStatePairs, 0) {}
165 const NamedImmMapper::Mapping A64SysReg::MRSMapper::MRSPairs[] = {
166 {"mdccsr_el0", MDCCSR_EL0},
167 {"dbgdtrrx_el0", DBGDTRRX_EL0},
168 {"mdrar_el1", MDRAR_EL1},
169 {"oslsr_el1", OSLSR_EL1},
170 {"dbgauthstatus_el1", DBGAUTHSTATUS_EL1},
171 {"pmceid0_el0", PMCEID0_EL0},
172 {"pmceid1_el0", PMCEID1_EL0},
173 {"midr_el1", MIDR_EL1},
174 {"ccsidr_el1", CCSIDR_EL1},
175 {"clidr_el1", CLIDR_EL1},
176 {"ctr_el0", CTR_EL0},
177 {"mpidr_el1", MPIDR_EL1},
178 {"revidr_el1", REVIDR_EL1},
179 {"aidr_el1", AIDR_EL1},
180 {"dczid_el0", DCZID_EL0},
181 {"id_pfr0_el1", ID_PFR0_EL1},
182 {"id_pfr1_el1", ID_PFR1_EL1},
183 {"id_dfr0_el1", ID_DFR0_EL1},
184 {"id_afr0_el1", ID_AFR0_EL1},
185 {"id_mmfr0_el1", ID_MMFR0_EL1},
186 {"id_mmfr1_el1", ID_MMFR1_EL1},
187 {"id_mmfr2_el1", ID_MMFR2_EL1},
188 {"id_mmfr3_el1", ID_MMFR3_EL1},
189 {"id_isar0_el1", ID_ISAR0_EL1},
190 {"id_isar1_el1", ID_ISAR1_EL1},
191 {"id_isar2_el1", ID_ISAR2_EL1},
192 {"id_isar3_el1", ID_ISAR3_EL1},
193 {"id_isar4_el1", ID_ISAR4_EL1},
194 {"id_isar5_el1", ID_ISAR5_EL1},
195 {"id_aa64pfr0_el1", ID_AA64PFR0_EL1},
196 {"id_aa64pfr1_el1", ID_AA64PFR1_EL1},
197 {"id_aa64dfr0_el1", ID_AA64DFR0_EL1},
198 {"id_aa64dfr1_el1", ID_AA64DFR1_EL1},
199 {"id_aa64afr0_el1", ID_AA64AFR0_EL1},
200 {"id_aa64afr1_el1", ID_AA64AFR1_EL1},
201 {"id_aa64isar0_el1", ID_AA64ISAR0_EL1},
202 {"id_aa64isar1_el1", ID_AA64ISAR1_EL1},
203 {"id_aa64mmfr0_el1", ID_AA64MMFR0_EL1},
204 {"id_aa64mmfr1_el1", ID_AA64MMFR1_EL1},
205 {"mvfr0_el1", MVFR0_EL1},
206 {"mvfr1_el1", MVFR1_EL1},
207 {"mvfr2_el1", MVFR2_EL1},
208 {"rvbar_el1", RVBAR_EL1},
209 {"rvbar_el2", RVBAR_EL2},
210 {"rvbar_el3", RVBAR_EL3},
211 {"isr_el1", ISR_EL1},
212 {"cntpct_el0", CNTPCT_EL0},
213 {"cntvct_el0", CNTVCT_EL0}
216 A64SysReg::MRSMapper::MRSMapper() {
217 InstPairs = &MRSPairs[0];
218 NumInstPairs = llvm::array_lengthof(MRSPairs);
221 const NamedImmMapper::Mapping A64SysReg::MSRMapper::MSRPairs[] = {
222 {"dbgdtrtx_el0", DBGDTRTX_EL0},
223 {"oslar_el1", OSLAR_EL1},
224 {"pmswinc_el0", PMSWINC_EL0}
227 A64SysReg::MSRMapper::MSRMapper() {
228 InstPairs = &MSRPairs[0];
229 NumInstPairs = llvm::array_lengthof(MSRPairs);
233 const NamedImmMapper::Mapping A64SysReg::SysRegMapper::SysRegPairs[] = {
234 {"osdtrrx_el1", OSDTRRX_EL1},
235 {"osdtrtx_el1", OSDTRTX_EL1},
236 {"teecr32_el1", TEECR32_EL1},
237 {"mdccint_el1", MDCCINT_EL1},
238 {"mdscr_el1", MDSCR_EL1},
239 {"dbgdtr_el0", DBGDTR_EL0},
240 {"oseccr_el1", OSECCR_EL1},
241 {"dbgvcr32_el2", DBGVCR32_EL2},
242 {"dbgbvr0_el1", DBGBVR0_EL1},
243 {"dbgbvr1_el1", DBGBVR1_EL1},
244 {"dbgbvr2_el1", DBGBVR2_EL1},
245 {"dbgbvr3_el1", DBGBVR3_EL1},
246 {"dbgbvr4_el1", DBGBVR4_EL1},
247 {"dbgbvr5_el1", DBGBVR5_EL1},
248 {"dbgbvr6_el1", DBGBVR6_EL1},
249 {"dbgbvr7_el1", DBGBVR7_EL1},
250 {"dbgbvr8_el1", DBGBVR8_EL1},
251 {"dbgbvr9_el1", DBGBVR9_EL1},
252 {"dbgbvr10_el1", DBGBVR10_EL1},
253 {"dbgbvr11_el1", DBGBVR11_EL1},
254 {"dbgbvr12_el1", DBGBVR12_EL1},
255 {"dbgbvr13_el1", DBGBVR13_EL1},
256 {"dbgbvr14_el1", DBGBVR14_EL1},
257 {"dbgbvr15_el1", DBGBVR15_EL1},
258 {"dbgbcr0_el1", DBGBCR0_EL1},
259 {"dbgbcr1_el1", DBGBCR1_EL1},
260 {"dbgbcr2_el1", DBGBCR2_EL1},
261 {"dbgbcr3_el1", DBGBCR3_EL1},
262 {"dbgbcr4_el1", DBGBCR4_EL1},
263 {"dbgbcr5_el1", DBGBCR5_EL1},
264 {"dbgbcr6_el1", DBGBCR6_EL1},
265 {"dbgbcr7_el1", DBGBCR7_EL1},
266 {"dbgbcr8_el1", DBGBCR8_EL1},
267 {"dbgbcr9_el1", DBGBCR9_EL1},
268 {"dbgbcr10_el1", DBGBCR10_EL1},
269 {"dbgbcr11_el1", DBGBCR11_EL1},
270 {"dbgbcr12_el1", DBGBCR12_EL1},
271 {"dbgbcr13_el1", DBGBCR13_EL1},
272 {"dbgbcr14_el1", DBGBCR14_EL1},
273 {"dbgbcr15_el1", DBGBCR15_EL1},
274 {"dbgwvr0_el1", DBGWVR0_EL1},
275 {"dbgwvr1_el1", DBGWVR1_EL1},
276 {"dbgwvr2_el1", DBGWVR2_EL1},
277 {"dbgwvr3_el1", DBGWVR3_EL1},
278 {"dbgwvr4_el1", DBGWVR4_EL1},
279 {"dbgwvr5_el1", DBGWVR5_EL1},
280 {"dbgwvr6_el1", DBGWVR6_EL1},
281 {"dbgwvr7_el1", DBGWVR7_EL1},
282 {"dbgwvr8_el1", DBGWVR8_EL1},
283 {"dbgwvr9_el1", DBGWVR9_EL1},
284 {"dbgwvr10_el1", DBGWVR10_EL1},
285 {"dbgwvr11_el1", DBGWVR11_EL1},
286 {"dbgwvr12_el1", DBGWVR12_EL1},
287 {"dbgwvr13_el1", DBGWVR13_EL1},
288 {"dbgwvr14_el1", DBGWVR14_EL1},
289 {"dbgwvr15_el1", DBGWVR15_EL1},
290 {"dbgwcr0_el1", DBGWCR0_EL1},
291 {"dbgwcr1_el1", DBGWCR1_EL1},
292 {"dbgwcr2_el1", DBGWCR2_EL1},
293 {"dbgwcr3_el1", DBGWCR3_EL1},
294 {"dbgwcr4_el1", DBGWCR4_EL1},
295 {"dbgwcr5_el1", DBGWCR5_EL1},
296 {"dbgwcr6_el1", DBGWCR6_EL1},
297 {"dbgwcr7_el1", DBGWCR7_EL1},
298 {"dbgwcr8_el1", DBGWCR8_EL1},
299 {"dbgwcr9_el1", DBGWCR9_EL1},
300 {"dbgwcr10_el1", DBGWCR10_EL1},
301 {"dbgwcr11_el1", DBGWCR11_EL1},
302 {"dbgwcr12_el1", DBGWCR12_EL1},
303 {"dbgwcr13_el1", DBGWCR13_EL1},
304 {"dbgwcr14_el1", DBGWCR14_EL1},
305 {"dbgwcr15_el1", DBGWCR15_EL1},
306 {"teehbr32_el1", TEEHBR32_EL1},
307 {"osdlr_el1", OSDLR_EL1},
308 {"dbgprcr_el1", DBGPRCR_EL1},
309 {"dbgclaimset_el1", DBGCLAIMSET_EL1},
310 {"dbgclaimclr_el1", DBGCLAIMCLR_EL1},
311 {"csselr_el1", CSSELR_EL1},
312 {"vpidr_el2", VPIDR_EL2},
313 {"vmpidr_el2", VMPIDR_EL2},
314 {"sctlr_el1", SCTLR_EL1},
315 {"sctlr_el2", SCTLR_EL2},
316 {"sctlr_el3", SCTLR_EL3},
317 {"actlr_el1", ACTLR_EL1},
318 {"actlr_el2", ACTLR_EL2},
319 {"actlr_el3", ACTLR_EL3},
320 {"cpacr_el1", CPACR_EL1},
321 {"hcr_el2", HCR_EL2},
322 {"scr_el3", SCR_EL3},
323 {"mdcr_el2", MDCR_EL2},
324 {"sder32_el3", SDER32_EL3},
325 {"cptr_el2", CPTR_EL2},
326 {"cptr_el3", CPTR_EL3},
327 {"hstr_el2", HSTR_EL2},
328 {"hacr_el2", HACR_EL2},
329 {"mdcr_el3", MDCR_EL3},
330 {"ttbr0_el1", TTBR0_EL1},
331 {"ttbr0_el2", TTBR0_EL2},
332 {"ttbr0_el3", TTBR0_EL3},
333 {"ttbr1_el1", TTBR1_EL1},
334 {"tcr_el1", TCR_EL1},
335 {"tcr_el2", TCR_EL2},
336 {"tcr_el3", TCR_EL3},
337 {"vttbr_el2", VTTBR_EL2},
338 {"vtcr_el2", VTCR_EL2},
339 {"dacr32_el2", DACR32_EL2},
340 {"spsr_el1", SPSR_EL1},
341 {"spsr_el2", SPSR_EL2},
342 {"spsr_el3", SPSR_EL3},
343 {"elr_el1", ELR_EL1},
344 {"elr_el2", ELR_EL2},
345 {"elr_el3", ELR_EL3},
352 {"currentel", CurrentEL},
353 {"spsr_irq", SPSR_irq},
354 {"spsr_abt", SPSR_abt},
355 {"spsr_und", SPSR_und},
356 {"spsr_fiq", SPSR_fiq},
359 {"dspsr_el0", DSPSR_EL0},
360 {"dlr_el0", DLR_EL0},
361 {"ifsr32_el2", IFSR32_EL2},
362 {"afsr0_el1", AFSR0_EL1},
363 {"afsr0_el2", AFSR0_EL2},
364 {"afsr0_el3", AFSR0_EL3},
365 {"afsr1_el1", AFSR1_EL1},
366 {"afsr1_el2", AFSR1_EL2},
367 {"afsr1_el3", AFSR1_EL3},
368 {"esr_el1", ESR_EL1},
369 {"esr_el2", ESR_EL2},
370 {"esr_el3", ESR_EL3},
371 {"fpexc32_el2", FPEXC32_EL2},
372 {"far_el1", FAR_EL1},
373 {"far_el2", FAR_EL2},
374 {"far_el3", FAR_EL3},
375 {"hpfar_el2", HPFAR_EL2},
376 {"par_el1", PAR_EL1},
377 {"pmcr_el0", PMCR_EL0},
378 {"pmcntenset_el0", PMCNTENSET_EL0},
379 {"pmcntenclr_el0", PMCNTENCLR_EL0},
380 {"pmovsclr_el0", PMOVSCLR_EL0},
381 {"pmselr_el0", PMSELR_EL0},
382 {"pmccntr_el0", PMCCNTR_EL0},
383 {"pmxevtyper_el0", PMXEVTYPER_EL0},
384 {"pmxevcntr_el0", PMXEVCNTR_EL0},
385 {"pmuserenr_el0", PMUSERENR_EL0},
386 {"pmintenset_el1", PMINTENSET_EL1},
387 {"pmintenclr_el1", PMINTENCLR_EL1},
388 {"pmovsset_el0", PMOVSSET_EL0},
389 {"mair_el1", MAIR_EL1},
390 {"mair_el2", MAIR_EL2},
391 {"mair_el3", MAIR_EL3},
392 {"amair_el1", AMAIR_EL1},
393 {"amair_el2", AMAIR_EL2},
394 {"amair_el3", AMAIR_EL3},
395 {"vbar_el1", VBAR_EL1},
396 {"vbar_el2", VBAR_EL2},
397 {"vbar_el3", VBAR_EL3},
398 {"rmr_el1", RMR_EL1},
399 {"rmr_el2", RMR_EL2},
400 {"rmr_el3", RMR_EL3},
401 {"contextidr_el1", CONTEXTIDR_EL1},
402 {"tpidr_el0", TPIDR_EL0},
403 {"tpidr_el2", TPIDR_EL2},
404 {"tpidr_el3", TPIDR_EL3},
405 {"tpidrro_el0", TPIDRRO_EL0},
406 {"tpidr_el1", TPIDR_EL1},
407 {"cntfrq_el0", CNTFRQ_EL0},
408 {"cntvoff_el2", CNTVOFF_EL2},
409 {"cntkctl_el1", CNTKCTL_EL1},
410 {"cnthctl_el2", CNTHCTL_EL2},
411 {"cntp_tval_el0", CNTP_TVAL_EL0},
412 {"cnthp_tval_el2", CNTHP_TVAL_EL2},
413 {"cntps_tval_el1", CNTPS_TVAL_EL1},
414 {"cntp_ctl_el0", CNTP_CTL_EL0},
415 {"cnthp_ctl_el2", CNTHP_CTL_EL2},
416 {"cntps_ctl_el1", CNTPS_CTL_EL1},
417 {"cntp_cval_el0", CNTP_CVAL_EL0},
418 {"cnthp_cval_el2", CNTHP_CVAL_EL2},
419 {"cntps_cval_el1", CNTPS_CVAL_EL1},
420 {"cntv_tval_el0", CNTV_TVAL_EL0},
421 {"cntv_ctl_el0", CNTV_CTL_EL0},
422 {"cntv_cval_el0", CNTV_CVAL_EL0},
423 {"pmevcntr0_el0", PMEVCNTR0_EL0},
424 {"pmevcntr1_el0", PMEVCNTR1_EL0},
425 {"pmevcntr2_el0", PMEVCNTR2_EL0},
426 {"pmevcntr3_el0", PMEVCNTR3_EL0},
427 {"pmevcntr4_el0", PMEVCNTR4_EL0},
428 {"pmevcntr5_el0", PMEVCNTR5_EL0},
429 {"pmevcntr6_el0", PMEVCNTR6_EL0},
430 {"pmevcntr7_el0", PMEVCNTR7_EL0},
431 {"pmevcntr8_el0", PMEVCNTR8_EL0},
432 {"pmevcntr9_el0", PMEVCNTR9_EL0},
433 {"pmevcntr10_el0", PMEVCNTR10_EL0},
434 {"pmevcntr11_el0", PMEVCNTR11_EL0},
435 {"pmevcntr12_el0", PMEVCNTR12_EL0},
436 {"pmevcntr13_el0", PMEVCNTR13_EL0},
437 {"pmevcntr14_el0", PMEVCNTR14_EL0},
438 {"pmevcntr15_el0", PMEVCNTR15_EL0},
439 {"pmevcntr16_el0", PMEVCNTR16_EL0},
440 {"pmevcntr17_el0", PMEVCNTR17_EL0},
441 {"pmevcntr18_el0", PMEVCNTR18_EL0},
442 {"pmevcntr19_el0", PMEVCNTR19_EL0},
443 {"pmevcntr20_el0", PMEVCNTR20_EL0},
444 {"pmevcntr21_el0", PMEVCNTR21_EL0},
445 {"pmevcntr22_el0", PMEVCNTR22_EL0},
446 {"pmevcntr23_el0", PMEVCNTR23_EL0},
447 {"pmevcntr24_el0", PMEVCNTR24_EL0},
448 {"pmevcntr25_el0", PMEVCNTR25_EL0},
449 {"pmevcntr26_el0", PMEVCNTR26_EL0},
450 {"pmevcntr27_el0", PMEVCNTR27_EL0},
451 {"pmevcntr28_el0", PMEVCNTR28_EL0},
452 {"pmevcntr29_el0", PMEVCNTR29_EL0},
453 {"pmevcntr30_el0", PMEVCNTR30_EL0},
454 {"pmccfiltr_el0", PMCCFILTR_EL0},
455 {"pmevtyper0_el0", PMEVTYPER0_EL0},
456 {"pmevtyper1_el0", PMEVTYPER1_EL0},
457 {"pmevtyper2_el0", PMEVTYPER2_EL0},
458 {"pmevtyper3_el0", PMEVTYPER3_EL0},
459 {"pmevtyper4_el0", PMEVTYPER4_EL0},
460 {"pmevtyper5_el0", PMEVTYPER5_EL0},
461 {"pmevtyper6_el0", PMEVTYPER6_EL0},
462 {"pmevtyper7_el0", PMEVTYPER7_EL0},
463 {"pmevtyper8_el0", PMEVTYPER8_EL0},
464 {"pmevtyper9_el0", PMEVTYPER9_EL0},
465 {"pmevtyper10_el0", PMEVTYPER10_EL0},
466 {"pmevtyper11_el0", PMEVTYPER11_EL0},
467 {"pmevtyper12_el0", PMEVTYPER12_EL0},
468 {"pmevtyper13_el0", PMEVTYPER13_EL0},
469 {"pmevtyper14_el0", PMEVTYPER14_EL0},
470 {"pmevtyper15_el0", PMEVTYPER15_EL0},
471 {"pmevtyper16_el0", PMEVTYPER16_EL0},
472 {"pmevtyper17_el0", PMEVTYPER17_EL0},
473 {"pmevtyper18_el0", PMEVTYPER18_EL0},
474 {"pmevtyper19_el0", PMEVTYPER19_EL0},
475 {"pmevtyper20_el0", PMEVTYPER20_EL0},
476 {"pmevtyper21_el0", PMEVTYPER21_EL0},
477 {"pmevtyper22_el0", PMEVTYPER22_EL0},
478 {"pmevtyper23_el0", PMEVTYPER23_EL0},
479 {"pmevtyper24_el0", PMEVTYPER24_EL0},
480 {"pmevtyper25_el0", PMEVTYPER25_EL0},
481 {"pmevtyper26_el0", PMEVTYPER26_EL0},
482 {"pmevtyper27_el0", PMEVTYPER27_EL0},
483 {"pmevtyper28_el0", PMEVTYPER28_EL0},
484 {"pmevtyper29_el0", PMEVTYPER29_EL0},
485 {"pmevtyper30_el0", PMEVTYPER30_EL0},
489 A64SysReg::SysRegMapper::fromString(StringRef Name, bool &Valid) const {
490 // First search the registers shared by all
491 std::string NameLower = Name.lower();
492 for (unsigned i = 0; i < array_lengthof(SysRegPairs); ++i) {
493 if (SysRegPairs[i].Name == NameLower) {
495 return SysRegPairs[i].Value;
499 // Now try the instruction-specific registers (either read-only or
501 for (unsigned i = 0; i < NumInstPairs; ++i) {
502 if (InstPairs[i].Name == NameLower) {
504 return InstPairs[i].Value;
508 // Try to parse an S<op0>_<op1>_<Cn>_<Cm>_<op2> register name, where the bits
509 // are: 11 xxx 1x11 xxxx xxx
510 Regex GenericRegPattern("^s3_([0-7])_c(1[15])_c([0-9]|1[0-5])_([0-7])$");
512 SmallVector<StringRef, 4> Ops;
513 if (!GenericRegPattern.match(NameLower, &Ops)) {
518 uint32_t Op0 = 3, Op1 = 0, CRn = 0, CRm = 0, Op2 = 0;
520 Ops[1].getAsInteger(10, Op1);
521 Ops[2].getAsInteger(10, CRn);
522 Ops[3].getAsInteger(10, CRm);
523 Ops[4].getAsInteger(10, Op2);
524 Bits = (Op0 << 14) | (Op1 << 11) | (CRn << 7) | (CRm << 3) | Op2;
531 A64SysReg::SysRegMapper::toString(uint32_t Bits, bool &Valid) const {
532 for (unsigned i = 0; i < array_lengthof(SysRegPairs); ++i) {
533 if (SysRegPairs[i].Value == Bits) {
535 return SysRegPairs[i].Name;
539 for (unsigned i = 0; i < NumInstPairs; ++i) {
540 if (InstPairs[i].Value == Bits) {
542 return InstPairs[i].Name;
546 uint32_t Op0 = (Bits >> 14) & 0x3;
547 uint32_t Op1 = (Bits >> 11) & 0x7;
548 uint32_t CRn = (Bits >> 7) & 0xf;
549 uint32_t CRm = (Bits >> 3) & 0xf;
550 uint32_t Op2 = Bits & 0x7;
552 // Only combinations matching: 11 xxx 1x11 xxxx xxx are valid for a generic
554 if (Op0 != 3 || (CRn != 11 && CRn != 15)) {
559 assert(Op0 == 3 && (CRn == 11 || CRn == 15) && "Invalid generic sysreg");
562 return "s3_" + utostr(Op1) + "_c" + utostr(CRn)
563 + "_c" + utostr(CRm) + "_" + utostr(Op2);
566 const NamedImmMapper::Mapping A64TLBI::TLBIMapper::TLBIPairs[] = {
567 {"ipas2e1is", IPAS2E1IS},
568 {"ipas2le1is", IPAS2LE1IS},
569 {"vmalle1is", VMALLE1IS},
570 {"alle2is", ALLE2IS},
571 {"alle3is", ALLE3IS},
575 {"aside1is", ASIDE1IS},
576 {"vaae1is", VAAE1IS},
577 {"alle1is", ALLE1IS},
578 {"vale1is", VALE1IS},
579 {"vale2is", VALE2IS},
580 {"vale3is", VALE3IS},
581 {"vmalls12e1is", VMALLS12E1IS},
582 {"vaale1is", VAALE1IS},
583 {"ipas2e1", IPAS2E1},
584 {"ipas2le1", IPAS2LE1},
585 {"vmalle1", VMALLE1},
597 {"vmalls12e1", VMALLS12E1},
601 A64TLBI::TLBIMapper::TLBIMapper()
602 : NamedImmMapper(TLBIPairs, 0) {}
604 bool A64Imms::isFPImm(const APFloat &Val, uint32_t &Imm8Bits) {
605 const fltSemantics &Sem = Val.getSemantics();
606 unsigned FracBits = APFloat::semanticsPrecision(Sem) - 1;
610 case 10: // IEEE half-precision
613 case 23: // IEEE single-precision
616 case 52: // IEEE double-precision
619 case 112: // IEEE quad-precision
620 // No immediates are valid for double precision.
623 llvm_unreachable("Only half, single and double precision supported");
626 uint32_t ExpStart = FracBits;
627 uint64_t FracMask = (1ULL << FracBits) - 1;
629 uint32_t Sign = Val.isNegative();
631 uint64_t Bits= Val.bitcastToAPInt().getLimitedValue();
632 uint64_t Fraction = Bits & FracMask;
633 int32_t Exponent = ((Bits >> ExpStart) & ExpMask);
634 Exponent -= ExpMask >> 1;
636 // S[d] = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>, 5):imm8<5:0>:Zeros(19)
637 // D[d] = imm8<7>:NOT(imm8<6>):Replicate(imm8<6>, 8):imm8<5:0>:Zeros(48)
638 // This translates to: only 4 bits of fraction; -3 <= exp <= 4.
639 uint64_t A64FracStart = FracBits - 4;
640 uint64_t A64FracMask = 0xf;
642 // Are there too many fraction bits?
643 if (Fraction & ~(A64FracMask << A64FracStart))
646 if (Exponent < -3 || Exponent > 4)
649 uint32_t PackedFraction = (Fraction >> A64FracStart) & A64FracMask;
650 uint32_t PackedExp = (Exponent + 7) & 0x7;
652 Imm8Bits = (Sign << 7) | (PackedExp << 4) | PackedFraction;
656 // Encoding of the immediate for logical (immediate) instructions:
658 // | N | imms | immr | size | R | S |
659 // |---+--------+--------+------+--------------+--------------|
660 // | 1 | ssssss | rrrrrr | 64 | UInt(rrrrrr) | UInt(ssssss) |
661 // | 0 | 0sssss | xrrrrr | 32 | UInt(rrrrr) | UInt(sssss) |
662 // | 0 | 10ssss | xxrrrr | 16 | UInt(rrrr) | UInt(ssss) |
663 // | 0 | 110sss | xxxrrr | 8 | UInt(rrr) | UInt(sss) |
664 // | 0 | 1110ss | xxxxrr | 4 | UInt(rr) | UInt(ss) |
665 // | 0 | 11110s | xxxxxr | 2 | UInt(r) | UInt(s) |
666 // | 0 | 11111x | - | | UNALLOCATED | |
668 // Columns 'R', 'S' and 'size' specify a "bitmask immediate" of size bits in
669 // which the lower S+1 bits are ones and the remaining bits are zero, then
670 // rotated right by R bits, which is then replicated across the datapath.
672 // + Values of 'N', 'imms' and 'immr' which do not match the above table are
674 // + If all 's' bits in the imms field are set then the instruction is
676 // + The 'x' bits in the 'immr' field are IGNORED.
678 bool A64Imms::isLogicalImm(unsigned RegWidth, uint64_t Imm, uint32_t &Bits) {
683 // Because there are S+1 ones in the replicated mask, an immediate of all
684 // zeros is not allowed. Filtering it here is probably more efficient.
685 if (Imm == 0) return false;
687 for (RepeatWidth = RegWidth; RepeatWidth > 1; RepeatWidth /= 2) {
688 uint64_t RepeatMask = RepeatWidth == 64 ? -1 : (1ULL << RepeatWidth) - 1;
689 uint64_t ReplicatedMask = Imm & RepeatMask;
691 if (ReplicatedMask == 0) continue;
693 // First we have to make sure the mask is actually repeated in each slot for
694 // this width-specifier.
695 bool IsReplicatedMask = true;
696 for (unsigned i = RepeatWidth; i < RegWidth; i += RepeatWidth) {
697 if (((Imm >> i) & RepeatMask) != ReplicatedMask) {
698 IsReplicatedMask = false;
702 if (!IsReplicatedMask) continue;
704 // Now we have to work out the amount of rotation needed. The first part of
705 // this calculation is actually independent of RepeatWidth, but the complex
706 // case will depend on it.
707 Rotation = CountTrailingZeros_64(Imm);
709 // There were no leading zeros, which means it's either in place or there
710 // are 1s at each end (e.g. 0x8003 needs rotating).
711 Rotation = RegWidth == 64 ? CountLeadingOnes_64(Imm)
712 : CountLeadingOnes_32(Imm);
713 Rotation = RepeatWidth - Rotation;
716 uint64_t ReplicatedOnes = (ReplicatedMask >> Rotation)
717 | ((ReplicatedMask << (RepeatWidth - Rotation)) & RepeatMask);
718 // Of course, they may not actually be ones, so we have to check that:
719 if (!isMask_64(ReplicatedOnes))
722 Num1s = CountTrailingOnes_64(ReplicatedOnes);
724 // We know we've got an almost valid encoding (certainly, if this is invalid
725 // no other parameters would work).
729 // The encodings which would produce all 1s are RESERVED.
730 if (RepeatWidth == 1 || Num1s == RepeatWidth) return false;
732 uint32_t N = RepeatWidth == 64;
733 uint32_t ImmR = RepeatWidth - Rotation;
734 uint32_t ImmS = Num1s - 1;
736 switch (RepeatWidth) {
737 default: break; // No action required for other valid rotations.
738 case 16: ImmS |= 0x20; break; // 10ssss
739 case 8: ImmS |= 0x30; break; // 110sss
740 case 4: ImmS |= 0x38; break; // 1110ss
741 case 2: ImmS |= 0x3c; break; // 11110s
744 Bits = ImmS | (ImmR << 6) | (N << 12);
750 bool A64Imms::isLogicalImmBits(unsigned RegWidth, uint32_t Bits, uint64_t &Imm) {
751 uint32_t N = Bits >> 12;
752 uint32_t ImmR = (Bits >> 6) & 0x3f;
753 uint32_t ImmS = Bits & 0x3f;
755 // N=1 encodes a 64-bit replication and is invalid for the 32-bit
757 if (RegWidth == 32 && N != 0) return false;
762 else if ((ImmS & 0x20) == 0)
764 else if ((ImmS & 0x10) == 0)
766 else if ((ImmS & 0x08) == 0)
768 else if ((ImmS & 0x04) == 0)
770 else if ((ImmS & 0x02) == 0)
773 // ImmS is 0b11111x: UNALLOCATED
777 int Num1s = (ImmS & (Width - 1)) + 1;
779 // All encodings which would map to -1 (signed) are RESERVED.
780 if (Num1s == Width) return false;
782 int Rotation = (ImmR & (Width - 1));
783 uint64_t Mask = (1ULL << Num1s) - 1;
784 uint64_t WidthMask = Width == 64 ? -1 : (1ULL << Width) - 1;
785 Mask = (Mask >> Rotation)
786 | ((Mask << (Width - Rotation)) & WidthMask);
789 for (unsigned i = 0; i < RegWidth / Width; ++i) {
797 bool A64Imms::isMOVZImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift) {
798 // If high bits are set then a 32-bit MOVZ can't possibly work.
799 if (RegWidth == 32 && (Value & ~0xffffffffULL))
802 for (int i = 0; i < RegWidth; i += 16) {
803 // If the value is 0 when we mask out all the bits that could be set with
804 // the current LSL value then it's representable.
805 if ((Value & ~(0xffffULL << i)) == 0) {
807 UImm16 = (Value >> i) & 0xffff;
814 bool A64Imms::isMOVNImm(int RegWidth, uint64_t Value, int &UImm16, int &Shift) {
815 // MOVN is defined to set its register to NOT(LSL(imm16, shift)).
817 // We have to be a little careful about a 32-bit register: 0xffff_1234 *is*
818 // representable, but ~0xffff_1234 == 0xffff_ffff_0000_edcb which is not
819 // a valid input for isMOVZImm.
820 if (RegWidth == 32 && (Value & ~0xffffffffULL))
823 uint64_t MOVZEquivalent = RegWidth == 32 ? ~Value & 0xffffffff : ~Value;
825 return isMOVZImm(RegWidth, MOVZEquivalent, UImm16, Shift);
828 bool A64Imms::isOnlyMOVNImm(int RegWidth, uint64_t Value,
829 int &UImm16, int &Shift) {
830 if (isMOVZImm(RegWidth, Value, UImm16, Shift))
833 return isMOVNImm(RegWidth, Value, UImm16, Shift);
836 MCSubtargetInfo *AArch64_MC::createAArch64MCSubtargetInfo(StringRef TT,
839 MCSubtargetInfo *X = new MCSubtargetInfo();
840 InitAArch64MCSubtargetInfo(X, TT, CPU, "");
845 static MCInstrInfo *createAArch64MCInstrInfo() {
846 MCInstrInfo *X = new MCInstrInfo();
847 InitAArch64MCInstrInfo(X);
851 static MCRegisterInfo *createAArch64MCRegisterInfo(StringRef Triple) {
852 MCRegisterInfo *X = new MCRegisterInfo();
853 InitAArch64MCRegisterInfo(X, AArch64::X30);
857 static MCAsmInfo *createAArch64MCAsmInfo(const Target &T, StringRef TT) {
858 Triple TheTriple(TT);
860 MCAsmInfo *MAI = new AArch64ELFMCAsmInfo();
861 MachineLocation Dst(MachineLocation::VirtualFP);
862 MachineLocation Src(AArch64::XSP, 0);
863 MAI->addInitialFrameState(0, Dst, Src);
868 static MCCodeGenInfo *createAArch64MCCodeGenInfo(StringRef TT, Reloc::Model RM,
870 CodeGenOpt::Level OL) {
871 MCCodeGenInfo *X = new MCCodeGenInfo();
872 if (RM == Reloc::Default || RM == Reloc::DynamicNoPIC) {
873 // On ELF platforms the default static relocation model has a smart enough
874 // linker to cope with referencing external symbols defined in a shared
875 // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
879 if (CM == CodeModel::Default)
880 CM = CodeModel::Small;
882 X->InitMCCodeGenInfo(RM, CM, OL);
886 static MCStreamer *createMCStreamer(const Target &T, StringRef TT,
887 MCContext &Ctx, MCAsmBackend &MAB,
889 MCCodeEmitter *Emitter,
892 Triple TheTriple(TT);
894 return createAArch64ELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack);
898 static MCInstPrinter *createAArch64MCInstPrinter(const Target &T,
899 unsigned SyntaxVariant,
900 const MCAsmInfo &MAI,
901 const MCInstrInfo &MII,
902 const MCRegisterInfo &MRI,
903 const MCSubtargetInfo &STI) {
904 if (SyntaxVariant == 0)
905 return new AArch64InstPrinter(MAI, MII, MRI, STI);
911 class AArch64MCInstrAnalysis : public MCInstrAnalysis {
913 AArch64MCInstrAnalysis(const MCInstrInfo *Info) : MCInstrAnalysis(Info) {}
915 virtual bool isUnconditionalBranch(const MCInst &Inst) const {
916 if (Inst.getOpcode() == AArch64::Bcc
917 && Inst.getOperand(0).getImm() == A64CC::AL)
919 return MCInstrAnalysis::isUnconditionalBranch(Inst);
922 virtual bool isConditionalBranch(const MCInst &Inst) const {
923 if (Inst.getOpcode() == AArch64::Bcc
924 && Inst.getOperand(0).getImm() == A64CC::AL)
926 return MCInstrAnalysis::isConditionalBranch(Inst);
929 uint64_t evaluateBranch(const MCInst &Inst, uint64_t Addr,
930 uint64_t Size) const {
931 unsigned LblOperand = Inst.getOpcode() == AArch64::Bcc ? 1 : 0;
932 // FIXME: We only handle PCRel branches for now.
933 if (Info->get(Inst.getOpcode()).OpInfo[LblOperand].OperandType
934 != MCOI::OPERAND_PCREL)
937 int64_t Imm = Inst.getOperand(LblOperand).getImm();
945 static MCInstrAnalysis *createAArch64MCInstrAnalysis(const MCInstrInfo *Info) {
946 return new AArch64MCInstrAnalysis(Info);
951 extern "C" void LLVMInitializeAArch64TargetMC() {
952 // Register the MC asm info.
953 RegisterMCAsmInfoFn A(TheAArch64Target, createAArch64MCAsmInfo);
955 // Register the MC codegen info.
956 TargetRegistry::RegisterMCCodeGenInfo(TheAArch64Target,
957 createAArch64MCCodeGenInfo);
959 // Register the MC instruction info.
960 TargetRegistry::RegisterMCInstrInfo(TheAArch64Target,
961 createAArch64MCInstrInfo);
963 // Register the MC register info.
964 TargetRegistry::RegisterMCRegInfo(TheAArch64Target,
965 createAArch64MCRegisterInfo);
967 // Register the MC subtarget info.
968 using AArch64_MC::createAArch64MCSubtargetInfo;
969 TargetRegistry::RegisterMCSubtargetInfo(TheAArch64Target,
970 createAArch64MCSubtargetInfo);
972 // Register the MC instruction analyzer.
973 TargetRegistry::RegisterMCInstrAnalysis(TheAArch64Target,
974 createAArch64MCInstrAnalysis);
976 // Register the MC Code Emitter
977 TargetRegistry::RegisterMCCodeEmitter(TheAArch64Target,
978 createAArch64MCCodeEmitter);
980 // Register the asm backend.
981 TargetRegistry::RegisterMCAsmBackend(TheAArch64Target,
982 createAArch64AsmBackend);
984 // Register the object streamer.
985 TargetRegistry::RegisterMCObjectStreamer(TheAArch64Target,
988 // Register the MCInstPrinter.
989 TargetRegistry::RegisterMCInstPrinter(TheAArch64Target,
990 createAArch64MCInstPrinter);