1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the AArch64MCCodeEmitter class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "mccodeemitter"
15 #include "MCTargetDesc/AArch64FixupKinds.h"
16 #include "MCTargetDesc/AArch64MCExpr.h"
17 #include "MCTargetDesc/AArch64MCTargetDesc.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/MC/MCCodeEmitter.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCRegisterInfo.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/raw_ostream.h"
31 class AArch64MCCodeEmitter : public MCCodeEmitter {
32 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &); // DO NOT IMPLEMENT
33 void operator=(const AArch64MCCodeEmitter &); // DO NOT IMPLEMENT
34 const MCInstrInfo &MCII;
35 const MCSubtargetInfo &STI;
39 AArch64MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
41 : MCII(mcii), STI(sti), Ctx(ctx) {
44 ~AArch64MCCodeEmitter() {}
46 unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
47 SmallVectorImpl<MCFixup> &Fixups) const;
49 unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
50 SmallVectorImpl<MCFixup> &Fixups) const;
53 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
54 SmallVectorImpl<MCFixup> &Fixups) const {
55 return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize);
58 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
59 SmallVectorImpl<MCFixup> &Fixups,
62 unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
63 SmallVectorImpl<MCFixup> &Fixups) const;
64 unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
65 SmallVectorImpl<MCFixup> &Fixups) const;
68 // Labels are handled mostly the same way: a symbol is needed, and
69 // just gets some fixup attached.
70 template<AArch64::Fixups fixupDesired>
71 unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
72 SmallVectorImpl<MCFixup> &Fixups) const;
74 unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
75 SmallVectorImpl<MCFixup> &Fixups) const;
78 unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
79 SmallVectorImpl<MCFixup> &Fixups) const;
82 unsigned getAddressWithFixup(const MCOperand &MO,
84 SmallVectorImpl<MCFixup> &Fixups) const;
87 // getBinaryCodeForInstr - TableGen'erated function for getting the
88 // binary encoding for an instruction.
89 uint64_t getBinaryCodeForInstr(const MCInst &MI,
90 SmallVectorImpl<MCFixup> &Fixups) const;
92 /// getMachineOpValue - Return binary encoding of operand. If the machine
93 /// operand requires relocation, record the relocation and return zero.
94 unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
95 SmallVectorImpl<MCFixup> &Fixups) const;
98 void EmitByte(unsigned char C, raw_ostream &OS) const {
102 void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
103 // Output the constant in little endian byte order.
104 for (unsigned i = 0; i != 4; ++i) {
105 EmitByte(Val & 0xff, OS);
111 void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
112 SmallVectorImpl<MCFixup> &Fixups) const;
114 unsigned fixFCMPImm(const MCInst &MI, unsigned EncodedValue) const;
116 template<int hasRs, int hasRt2> unsigned
117 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const;
119 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const;
121 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const;
126 } // end anonymous namespace
128 unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
130 SmallVectorImpl<MCFixup> &Fixups) const {
132 // This can occur for manually decoded or constructed MCInsts, but neither
133 // the assembly-parser nor instruction selection will currently produce an
134 // MCInst that's not a symbol reference.
135 assert(MO.isImm() && "Unexpected address requested");
139 const MCExpr *Expr = MO.getExpr();
140 MCFixupKind Kind = MCFixupKind(FixupKind);
141 Fixups.push_back(MCFixup::Create(0, Expr, Kind));
146 unsigned AArch64MCCodeEmitter::
147 getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
148 SmallVectorImpl<MCFixup> &Fixups,
150 const MCOperand &ImmOp = MI.getOperand(OpIdx);
152 return ImmOp.getImm();
154 assert(ImmOp.isExpr() && "Unexpected operand type");
155 const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
159 switch (Expr->getKind()) {
160 default: llvm_unreachable("Unexpected operand modifier");
161 case AArch64MCExpr::VK_AARCH64_LO12: {
162 unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
163 AArch64::fixup_a64_ldst16_lo12,
164 AArch64::fixup_a64_ldst32_lo12,
165 AArch64::fixup_a64_ldst64_lo12,
166 AArch64::fixup_a64_ldst128_lo12 };
167 assert(MemSize <= 16 && "Invalid fixup for operation");
168 FixupKind = FixupsBySize[Log2_32(MemSize)];
171 case AArch64MCExpr::VK_AARCH64_GOT_LO12:
172 assert(MemSize == 8 && "Invalid fixup for operation");
173 FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
175 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: {
176 unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_dtprel_lo12,
177 AArch64::fixup_a64_ldst16_dtprel_lo12,
178 AArch64::fixup_a64_ldst32_dtprel_lo12,
179 AArch64::fixup_a64_ldst64_dtprel_lo12 };
180 assert(MemSize <= 8 && "Invalid fixup for operation");
181 FixupKind = FixupsBySize[Log2_32(MemSize)];
184 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
185 unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
186 AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
187 AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
188 AArch64::fixup_a64_ldst64_dtprel_lo12_nc };
189 assert(MemSize <= 8 && "Invalid fixup for operation");
190 FixupKind = FixupsBySize[Log2_32(MemSize)];
193 case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
194 assert(MemSize == 8 && "Invalid fixup for operation");
195 FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
197 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
198 unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_tprel_lo12,
199 AArch64::fixup_a64_ldst16_tprel_lo12,
200 AArch64::fixup_a64_ldst32_tprel_lo12,
201 AArch64::fixup_a64_ldst64_tprel_lo12 };
202 assert(MemSize <= 8 && "Invalid fixup for operation");
203 FixupKind = FixupsBySize[Log2_32(MemSize)];
206 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
207 unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_tprel_lo12_nc,
208 AArch64::fixup_a64_ldst16_tprel_lo12_nc,
209 AArch64::fixup_a64_ldst32_tprel_lo12_nc,
210 AArch64::fixup_a64_ldst64_tprel_lo12_nc };
211 assert(MemSize <= 8 && "Invalid fixup for operation");
212 FixupKind = FixupsBySize[Log2_32(MemSize)];
215 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
216 assert(MemSize == 8 && "Invalid fixup for operation");
217 FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
221 return getAddressWithFixup(ImmOp, FixupKind, Fixups);
225 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
226 SmallVectorImpl<MCFixup> &Fixups) const {
227 const MCOperand &MO = MI.getOperand(OpIdx);
229 return static_cast<unsigned>(MO.getImm());
233 unsigned FixupKind = 0;
234 switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
235 default: llvm_unreachable("Invalid expression modifier");
236 case AArch64MCExpr::VK_AARCH64_LO12:
237 FixupKind = AArch64::fixup_a64_add_lo12; break;
238 case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
239 FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
240 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
241 FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
242 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
243 FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
244 case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
245 FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
246 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
247 FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
248 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
249 FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
250 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
251 FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
254 return getAddressWithFixup(MO, FixupKind, Fixups);
258 AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
259 SmallVectorImpl<MCFixup> &Fixups) const {
261 const MCOperand &MO = MI.getOperand(OpIdx);
263 return static_cast<unsigned>(MO.getImm());
267 unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
268 if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
269 Modifier = Expr->getKind();
271 unsigned FixupKind = 0;
273 case AArch64MCExpr::VK_AARCH64_None:
274 FixupKind = AArch64::fixup_a64_adr_prel_page;
276 case AArch64MCExpr::VK_AARCH64_GOT:
277 FixupKind = AArch64::fixup_a64_adr_prel_got_page;
279 case AArch64MCExpr::VK_AARCH64_GOTTPREL:
280 FixupKind = AArch64::fixup_a64_adr_gottprel_page;
282 case AArch64MCExpr::VK_AARCH64_TLSDESC:
283 FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
286 llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
289 return getAddressWithFixup(MO, FixupKind, Fixups);
293 AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
294 SmallVectorImpl<MCFixup> &Fixups) const {
296 const MCOperand &MO = MI.getOperand(OpIdx);
297 assert(MO.isImm() && "Only immediate expected for shift");
299 return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
303 AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
304 SmallVectorImpl<MCFixup> &Fixups) const {
306 const MCOperand &MO = MI.getOperand(OpIdx);
307 assert(MO.isImm() && "Only immediate expected for shift");
309 return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
313 template<AArch64::Fixups fixupDesired> unsigned
314 AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
316 SmallVectorImpl<MCFixup> &Fixups) const {
317 const MCOperand &MO = MI.getOperand(OpIdx);
320 return getAddressWithFixup(MO, fixupDesired, Fixups);
327 AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
329 SmallVectorImpl<MCFixup> &Fixups) const {
330 const MCOperand &MO = MI.getOperand(OpIdx);
338 if (isa<AArch64MCExpr>(MO.getExpr())) {
339 assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
340 == AArch64MCExpr::VK_AARCH64_GOTTPREL
341 && "Invalid symbol modifier for literal load");
342 FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
344 FixupKind = AArch64::fixup_a64_ld_prel;
347 return getAddressWithFixup(MO, FixupKind, Fixups);
352 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
354 SmallVectorImpl<MCFixup> &Fixups) const {
356 return Ctx.getRegisterInfo().getEncodingValue(MO.getReg());
357 } else if (MO.isImm()) {
358 return static_cast<unsigned>(MO.getImm());
361 llvm_unreachable("Unable to encode MCOperand!");
366 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
367 SmallVectorImpl<MCFixup> &Fixups) const {
368 const MCOperand &UImm16MO = MI.getOperand(OpIdx);
369 const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
371 unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
373 if (UImm16MO.isImm()) {
374 Result |= UImm16MO.getImm();
378 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
379 AArch64::Fixups requestedFixup;
380 switch (A64E->getKind()) {
381 default: llvm_unreachable("unexpected expression modifier");
382 case AArch64MCExpr::VK_AARCH64_ABS_G0:
383 requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
384 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
385 requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
386 case AArch64MCExpr::VK_AARCH64_ABS_G1:
387 requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
388 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
389 requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
390 case AArch64MCExpr::VK_AARCH64_ABS_G2:
391 requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
392 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
393 requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
394 case AArch64MCExpr::VK_AARCH64_ABS_G3:
395 requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
396 case AArch64MCExpr::VK_AARCH64_SABS_G0:
397 requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
398 case AArch64MCExpr::VK_AARCH64_SABS_G1:
399 requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
400 case AArch64MCExpr::VK_AARCH64_SABS_G2:
401 requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
402 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
403 requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
404 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
405 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
406 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
407 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
408 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
409 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
410 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
411 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
412 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
413 requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
414 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
415 requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
416 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
417 requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
418 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
419 requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
420 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
421 requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
422 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
423 requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
424 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
425 requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
428 return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups);
431 unsigned AArch64MCCodeEmitter::fixFCMPImm(const MCInst &MI,
432 unsigned EncodedValue) const {
433 // For FCMP[E] Rn, #0.0, the Rm field has a canonical representation
434 // with 0s, but is architecturally ignored
435 EncodedValue &= ~0x1f0000u;
440 template<int hasRs, int hasRt2> unsigned
441 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
442 unsigned EncodedValue) const {
443 if (!hasRs) EncodedValue |= 0x001F0000;
444 if (!hasRt2) EncodedValue |= 0x00007C00;
450 AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const {
451 // If one of the signed fixup kinds is applied to a MOVZ instruction, the
452 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
453 // job to ensure that any bits possibly affected by this are 0. This means we
454 // must zero out bit 30 (essentially emitting a MOVN).
455 MCOperand UImm16MO = MI.getOperand(1);
457 // Nothing to do if there's no fixup.
458 if (UImm16MO.isImm())
461 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
462 switch (A64E->getKind()) {
463 case AArch64MCExpr::VK_AARCH64_SABS_G0:
464 case AArch64MCExpr::VK_AARCH64_SABS_G1:
465 case AArch64MCExpr::VK_AARCH64_SABS_G2:
466 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
467 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
468 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
469 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
470 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
471 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
472 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
473 return EncodedValue & ~(1u << 30);
475 // Nothing to do for an unsigned fixup.
479 llvm_unreachable("Should have returned by now");
483 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
484 unsigned EncodedValue) const {
485 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
486 // (i.e. all bits 1) but is ignored by the processor.
487 EncodedValue |= 0x1f << 10;
491 MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
492 const MCRegisterInfo &MRI,
493 const MCSubtargetInfo &STI,
495 return new AArch64MCCodeEmitter(MCII, STI, Ctx);
498 void AArch64MCCodeEmitter::
499 EncodeInstruction(const MCInst &MI, raw_ostream &OS,
500 SmallVectorImpl<MCFixup> &Fixups) const {
501 if (MI.getOpcode() == AArch64::TLSDESCCALL) {
502 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
503 // following (BLR) instruction. It doesn't emit any code itself so it
504 // doesn't go through the normal TableGenerated channels.
505 MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
507 Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
508 Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
512 uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
514 EmitInstruction(Binary, OS);
518 #include "AArch64GenMCCodeEmitter.inc"