1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the AArch64MCCodeEmitter class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "mccodeemitter"
15 #include "MCTargetDesc/AArch64FixupKinds.h"
16 #include "MCTargetDesc/AArch64MCExpr.h"
17 #include "MCTargetDesc/AArch64MCTargetDesc.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/MC/MCCodeEmitter.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCRegisterInfo.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/raw_ostream.h"
31 class AArch64MCCodeEmitter : public MCCodeEmitter {
32 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
33 void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
37 AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {}
39 ~AArch64MCCodeEmitter() {}
41 unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
42 SmallVectorImpl<MCFixup> &Fixups) const;
44 unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
45 SmallVectorImpl<MCFixup> &Fixups) const;
48 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
49 SmallVectorImpl<MCFixup> &Fixups) const {
50 return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize);
53 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
54 SmallVectorImpl<MCFixup> &Fixups,
57 unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
58 SmallVectorImpl<MCFixup> &Fixups) const;
59 unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
60 SmallVectorImpl<MCFixup> &Fixups) const;
63 // Labels are handled mostly the same way: a symbol is needed, and
64 // just gets some fixup attached.
65 template<AArch64::Fixups fixupDesired>
66 unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
67 SmallVectorImpl<MCFixup> &Fixups) const;
69 unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
70 SmallVectorImpl<MCFixup> &Fixups) const;
73 unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
74 SmallVectorImpl<MCFixup> &Fixups) const;
77 unsigned getAddressWithFixup(const MCOperand &MO,
79 SmallVectorImpl<MCFixup> &Fixups) const;
82 // getBinaryCodeForInstr - TableGen'erated function for getting the
83 // binary encoding for an instruction.
84 uint64_t getBinaryCodeForInstr(const MCInst &MI,
85 SmallVectorImpl<MCFixup> &Fixups) const;
87 /// getMachineOpValue - Return binary encoding of operand. If the machine
88 /// operand requires relocation, record the relocation and return zero.
89 unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
90 SmallVectorImpl<MCFixup> &Fixups) const;
93 void EmitByte(unsigned char C, raw_ostream &OS) const {
97 void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
98 // Output the constant in little endian byte order.
99 for (unsigned i = 0; i != 4; ++i) {
100 EmitByte(Val & 0xff, OS);
106 void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
107 SmallVectorImpl<MCFixup> &Fixups) const;
109 template<int hasRs, int hasRt2> unsigned
110 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const;
112 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const;
114 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const;
119 } // end anonymous namespace
121 unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
123 SmallVectorImpl<MCFixup> &Fixups) const {
125 // This can occur for manually decoded or constructed MCInsts, but neither
126 // the assembly-parser nor instruction selection will currently produce an
127 // MCInst that's not a symbol reference.
128 assert(MO.isImm() && "Unexpected address requested");
132 const MCExpr *Expr = MO.getExpr();
133 MCFixupKind Kind = MCFixupKind(FixupKind);
134 Fixups.push_back(MCFixup::Create(0, Expr, Kind));
139 unsigned AArch64MCCodeEmitter::
140 getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
141 SmallVectorImpl<MCFixup> &Fixups,
143 const MCOperand &ImmOp = MI.getOperand(OpIdx);
145 return ImmOp.getImm();
147 assert(ImmOp.isExpr() && "Unexpected operand type");
148 const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
152 switch (Expr->getKind()) {
153 default: llvm_unreachable("Unexpected operand modifier");
154 case AArch64MCExpr::VK_AARCH64_LO12: {
155 static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
156 AArch64::fixup_a64_ldst16_lo12,
157 AArch64::fixup_a64_ldst32_lo12,
158 AArch64::fixup_a64_ldst64_lo12,
159 AArch64::fixup_a64_ldst128_lo12 };
160 assert(MemSize <= 16 && "Invalid fixup for operation");
161 FixupKind = FixupsBySize[Log2_32(MemSize)];
164 case AArch64MCExpr::VK_AARCH64_GOT_LO12:
165 assert(MemSize == 8 && "Invalid fixup for operation");
166 FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
168 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: {
169 static const unsigned FixupsBySize[] = {
170 AArch64::fixup_a64_ldst8_dtprel_lo12,
171 AArch64::fixup_a64_ldst16_dtprel_lo12,
172 AArch64::fixup_a64_ldst32_dtprel_lo12,
173 AArch64::fixup_a64_ldst64_dtprel_lo12
175 assert(MemSize <= 8 && "Invalid fixup for operation");
176 FixupKind = FixupsBySize[Log2_32(MemSize)];
179 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
180 static const unsigned FixupsBySize[] = {
181 AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
182 AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
183 AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
184 AArch64::fixup_a64_ldst64_dtprel_lo12_nc
186 assert(MemSize <= 8 && "Invalid fixup for operation");
187 FixupKind = FixupsBySize[Log2_32(MemSize)];
190 case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
191 assert(MemSize == 8 && "Invalid fixup for operation");
192 FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
194 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
195 static const unsigned FixupsBySize[] = {
196 AArch64::fixup_a64_ldst8_tprel_lo12,
197 AArch64::fixup_a64_ldst16_tprel_lo12,
198 AArch64::fixup_a64_ldst32_tprel_lo12,
199 AArch64::fixup_a64_ldst64_tprel_lo12
201 assert(MemSize <= 8 && "Invalid fixup for operation");
202 FixupKind = FixupsBySize[Log2_32(MemSize)];
205 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
206 static const unsigned FixupsBySize[] = {
207 AArch64::fixup_a64_ldst8_tprel_lo12_nc,
208 AArch64::fixup_a64_ldst16_tprel_lo12_nc,
209 AArch64::fixup_a64_ldst32_tprel_lo12_nc,
210 AArch64::fixup_a64_ldst64_tprel_lo12_nc
212 assert(MemSize <= 8 && "Invalid fixup for operation");
213 FixupKind = FixupsBySize[Log2_32(MemSize)];
216 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
217 assert(MemSize == 8 && "Invalid fixup for operation");
218 FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
222 return getAddressWithFixup(ImmOp, FixupKind, Fixups);
226 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
227 SmallVectorImpl<MCFixup> &Fixups) const {
228 const MCOperand &MO = MI.getOperand(OpIdx);
230 return static_cast<unsigned>(MO.getImm());
234 unsigned FixupKind = 0;
235 switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
236 default: llvm_unreachable("Invalid expression modifier");
237 case AArch64MCExpr::VK_AARCH64_LO12:
238 FixupKind = AArch64::fixup_a64_add_lo12; break;
239 case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
240 FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
241 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
242 FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
243 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
244 FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
245 case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
246 FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
247 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
248 FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
249 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
250 FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
251 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
252 FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
255 return getAddressWithFixup(MO, FixupKind, Fixups);
259 AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
260 SmallVectorImpl<MCFixup> &Fixups) const {
262 const MCOperand &MO = MI.getOperand(OpIdx);
264 return static_cast<unsigned>(MO.getImm());
268 unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
269 if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
270 Modifier = Expr->getKind();
272 unsigned FixupKind = 0;
274 case AArch64MCExpr::VK_AARCH64_None:
275 FixupKind = AArch64::fixup_a64_adr_prel_page;
277 case AArch64MCExpr::VK_AARCH64_GOT:
278 FixupKind = AArch64::fixup_a64_adr_prel_got_page;
280 case AArch64MCExpr::VK_AARCH64_GOTTPREL:
281 FixupKind = AArch64::fixup_a64_adr_gottprel_page;
283 case AArch64MCExpr::VK_AARCH64_TLSDESC:
284 FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
287 llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
290 return getAddressWithFixup(MO, FixupKind, Fixups);
294 AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
295 SmallVectorImpl<MCFixup> &Fixups) const {
297 const MCOperand &MO = MI.getOperand(OpIdx);
298 assert(MO.isImm() && "Only immediate expected for shift");
300 return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
304 AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
305 SmallVectorImpl<MCFixup> &Fixups) const {
307 const MCOperand &MO = MI.getOperand(OpIdx);
308 assert(MO.isImm() && "Only immediate expected for shift");
310 return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
314 template<AArch64::Fixups fixupDesired> unsigned
315 AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
317 SmallVectorImpl<MCFixup> &Fixups) const {
318 const MCOperand &MO = MI.getOperand(OpIdx);
321 return getAddressWithFixup(MO, fixupDesired, Fixups);
328 AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
330 SmallVectorImpl<MCFixup> &Fixups) const {
331 const MCOperand &MO = MI.getOperand(OpIdx);
339 if (isa<AArch64MCExpr>(MO.getExpr())) {
340 assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
341 == AArch64MCExpr::VK_AARCH64_GOTTPREL
342 && "Invalid symbol modifier for literal load");
343 FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
345 FixupKind = AArch64::fixup_a64_ld_prel;
348 return getAddressWithFixup(MO, FixupKind, Fixups);
353 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
355 SmallVectorImpl<MCFixup> &Fixups) const {
357 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
358 } else if (MO.isImm()) {
359 return static_cast<unsigned>(MO.getImm());
362 llvm_unreachable("Unable to encode MCOperand!");
367 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
368 SmallVectorImpl<MCFixup> &Fixups) const {
369 const MCOperand &UImm16MO = MI.getOperand(OpIdx);
370 const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
372 unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
374 if (UImm16MO.isImm()) {
375 Result |= UImm16MO.getImm();
379 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
380 AArch64::Fixups requestedFixup;
381 switch (A64E->getKind()) {
382 default: llvm_unreachable("unexpected expression modifier");
383 case AArch64MCExpr::VK_AARCH64_ABS_G0:
384 requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
385 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
386 requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
387 case AArch64MCExpr::VK_AARCH64_ABS_G1:
388 requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
389 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
390 requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
391 case AArch64MCExpr::VK_AARCH64_ABS_G2:
392 requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
393 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
394 requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
395 case AArch64MCExpr::VK_AARCH64_ABS_G3:
396 requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
397 case AArch64MCExpr::VK_AARCH64_SABS_G0:
398 requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
399 case AArch64MCExpr::VK_AARCH64_SABS_G1:
400 requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
401 case AArch64MCExpr::VK_AARCH64_SABS_G2:
402 requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
403 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
404 requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
405 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
406 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
407 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
408 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
409 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
410 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
411 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
412 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
413 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
414 requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
415 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
416 requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
417 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
418 requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
419 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
420 requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
421 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
422 requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
423 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
424 requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
425 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
426 requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
429 return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups);
432 template<int hasRs, int hasRt2> unsigned
433 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
434 unsigned EncodedValue) const {
435 if (!hasRs) EncodedValue |= 0x001F0000;
436 if (!hasRt2) EncodedValue |= 0x00007C00;
442 AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const {
443 // If one of the signed fixup kinds is applied to a MOVZ instruction, the
444 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
445 // job to ensure that any bits possibly affected by this are 0. This means we
446 // must zero out bit 30 (essentially emitting a MOVN).
447 MCOperand UImm16MO = MI.getOperand(1);
449 // Nothing to do if there's no fixup.
450 if (UImm16MO.isImm())
453 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
454 switch (A64E->getKind()) {
455 case AArch64MCExpr::VK_AARCH64_SABS_G0:
456 case AArch64MCExpr::VK_AARCH64_SABS_G1:
457 case AArch64MCExpr::VK_AARCH64_SABS_G2:
458 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
459 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
460 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
461 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
462 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
463 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
464 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
465 return EncodedValue & ~(1u << 30);
467 // Nothing to do for an unsigned fixup.
471 llvm_unreachable("Should have returned by now");
475 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
476 unsigned EncodedValue) const {
477 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
478 // (i.e. all bits 1) but is ignored by the processor.
479 EncodedValue |= 0x1f << 10;
483 MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
484 const MCRegisterInfo &MRI,
485 const MCSubtargetInfo &STI,
487 return new AArch64MCCodeEmitter(Ctx);
490 void AArch64MCCodeEmitter::
491 EncodeInstruction(const MCInst &MI, raw_ostream &OS,
492 SmallVectorImpl<MCFixup> &Fixups) const {
493 if (MI.getOpcode() == AArch64::TLSDESCCALL) {
494 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
495 // following (BLR) instruction. It doesn't emit any code itself so it
496 // doesn't go through the normal TableGenerated channels.
497 MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
499 Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
500 Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
504 uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
506 EmitInstruction(Binary, OS);
510 #include "AArch64GenMCCodeEmitter.inc"