1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the AArch64MCCodeEmitter class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "mccodeemitter"
15 #include "MCTargetDesc/AArch64FixupKinds.h"
16 #include "MCTargetDesc/AArch64MCExpr.h"
17 #include "MCTargetDesc/AArch64MCTargetDesc.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/MC/MCCodeEmitter.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCRegisterInfo.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/raw_ostream.h"
31 class AArch64MCCodeEmitter : public MCCodeEmitter {
32 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
33 void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
37 AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {}
39 ~AArch64MCCodeEmitter() {}
41 unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
42 SmallVectorImpl<MCFixup> &Fixups) const;
44 unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
45 SmallVectorImpl<MCFixup> &Fixups) const;
48 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
49 SmallVectorImpl<MCFixup> &Fixups) const {
50 return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize);
53 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
54 SmallVectorImpl<MCFixup> &Fixups,
57 unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
58 SmallVectorImpl<MCFixup> &Fixups) const;
59 unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
60 SmallVectorImpl<MCFixup> &Fixups) const;
62 unsigned getShiftRightImm8(const MCInst &MI, unsigned Op,
63 SmallVectorImpl<MCFixup> &Fixups) const;
64 unsigned getShiftRightImm16(const MCInst &MI, unsigned Op,
65 SmallVectorImpl<MCFixup> &Fixups) const;
66 unsigned getShiftRightImm32(const MCInst &MI, unsigned Op,
67 SmallVectorImpl<MCFixup> &Fixups) const;
68 unsigned getShiftRightImm64(const MCInst &MI, unsigned Op,
69 SmallVectorImpl<MCFixup> &Fixups) const;
71 unsigned getShiftLeftImm8(const MCInst &MI, unsigned Op,
72 SmallVectorImpl<MCFixup> &Fixups) const;
73 unsigned getShiftLeftImm16(const MCInst &MI, unsigned Op,
74 SmallVectorImpl<MCFixup> &Fixups) const;
75 unsigned getShiftLeftImm32(const MCInst &MI, unsigned Op,
76 SmallVectorImpl<MCFixup> &Fixups) const;
77 unsigned getShiftLeftImm64(const MCInst &MI, unsigned Op,
78 SmallVectorImpl<MCFixup> &Fixups) const;
80 // Labels are handled mostly the same way: a symbol is needed, and
81 // just gets some fixup attached.
82 template<AArch64::Fixups fixupDesired>
83 unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
84 SmallVectorImpl<MCFixup> &Fixups) const;
86 unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
87 SmallVectorImpl<MCFixup> &Fixups) const;
90 unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
91 SmallVectorImpl<MCFixup> &Fixups) const;
94 unsigned getAddressWithFixup(const MCOperand &MO,
96 SmallVectorImpl<MCFixup> &Fixups) const;
99 // getBinaryCodeForInstr - TableGen'erated function for getting the
100 // binary encoding for an instruction.
101 uint64_t getBinaryCodeForInstr(const MCInst &MI,
102 SmallVectorImpl<MCFixup> &Fixups) const;
104 /// getMachineOpValue - Return binary encoding of operand. If the machine
105 /// operand requires relocation, record the relocation and return zero.
106 unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
107 SmallVectorImpl<MCFixup> &Fixups) const;
110 void EmitByte(unsigned char C, raw_ostream &OS) const {
114 void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
115 // Output the constant in little endian byte order.
116 for (unsigned i = 0; i != 4; ++i) {
117 EmitByte(Val & 0xff, OS);
123 void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
124 SmallVectorImpl<MCFixup> &Fixups,
125 const MCSubtargetInfo &STI) const;
127 template<int hasRs, int hasRt2> unsigned
128 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const;
130 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const;
132 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const;
137 } // end anonymous namespace
139 unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
141 SmallVectorImpl<MCFixup> &Fixups) const {
143 // This can occur for manually decoded or constructed MCInsts, but neither
144 // the assembly-parser nor instruction selection will currently produce an
145 // MCInst that's not a symbol reference.
146 assert(MO.isImm() && "Unexpected address requested");
150 const MCExpr *Expr = MO.getExpr();
151 MCFixupKind Kind = MCFixupKind(FixupKind);
152 Fixups.push_back(MCFixup::Create(0, Expr, Kind));
157 unsigned AArch64MCCodeEmitter::
158 getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
159 SmallVectorImpl<MCFixup> &Fixups,
161 const MCOperand &ImmOp = MI.getOperand(OpIdx);
163 return ImmOp.getImm();
165 assert(ImmOp.isExpr() && "Unexpected operand type");
166 const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
170 switch (Expr->getKind()) {
171 default: llvm_unreachable("Unexpected operand modifier");
172 case AArch64MCExpr::VK_AARCH64_LO12: {
173 static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
174 AArch64::fixup_a64_ldst16_lo12,
175 AArch64::fixup_a64_ldst32_lo12,
176 AArch64::fixup_a64_ldst64_lo12,
177 AArch64::fixup_a64_ldst128_lo12 };
178 assert(MemSize <= 16 && "Invalid fixup for operation");
179 FixupKind = FixupsBySize[Log2_32(MemSize)];
182 case AArch64MCExpr::VK_AARCH64_GOT_LO12:
183 assert(MemSize == 8 && "Invalid fixup for operation");
184 FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
186 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: {
187 static const unsigned FixupsBySize[] = {
188 AArch64::fixup_a64_ldst8_dtprel_lo12,
189 AArch64::fixup_a64_ldst16_dtprel_lo12,
190 AArch64::fixup_a64_ldst32_dtprel_lo12,
191 AArch64::fixup_a64_ldst64_dtprel_lo12
193 assert(MemSize <= 8 && "Invalid fixup for operation");
194 FixupKind = FixupsBySize[Log2_32(MemSize)];
197 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
198 static const unsigned FixupsBySize[] = {
199 AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
200 AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
201 AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
202 AArch64::fixup_a64_ldst64_dtprel_lo12_nc
204 assert(MemSize <= 8 && "Invalid fixup for operation");
205 FixupKind = FixupsBySize[Log2_32(MemSize)];
208 case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
209 assert(MemSize == 8 && "Invalid fixup for operation");
210 FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
212 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
213 static const unsigned FixupsBySize[] = {
214 AArch64::fixup_a64_ldst8_tprel_lo12,
215 AArch64::fixup_a64_ldst16_tprel_lo12,
216 AArch64::fixup_a64_ldst32_tprel_lo12,
217 AArch64::fixup_a64_ldst64_tprel_lo12
219 assert(MemSize <= 8 && "Invalid fixup for operation");
220 FixupKind = FixupsBySize[Log2_32(MemSize)];
223 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
224 static const unsigned FixupsBySize[] = {
225 AArch64::fixup_a64_ldst8_tprel_lo12_nc,
226 AArch64::fixup_a64_ldst16_tprel_lo12_nc,
227 AArch64::fixup_a64_ldst32_tprel_lo12_nc,
228 AArch64::fixup_a64_ldst64_tprel_lo12_nc
230 assert(MemSize <= 8 && "Invalid fixup for operation");
231 FixupKind = FixupsBySize[Log2_32(MemSize)];
234 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
235 assert(MemSize == 8 && "Invalid fixup for operation");
236 FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
240 return getAddressWithFixup(ImmOp, FixupKind, Fixups);
244 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
245 SmallVectorImpl<MCFixup> &Fixups) const {
246 const MCOperand &MO = MI.getOperand(OpIdx);
248 return static_cast<unsigned>(MO.getImm());
252 unsigned FixupKind = 0;
253 switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
254 default: llvm_unreachable("Invalid expression modifier");
255 case AArch64MCExpr::VK_AARCH64_LO12:
256 FixupKind = AArch64::fixup_a64_add_lo12; break;
257 case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
258 FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
259 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
260 FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
261 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
262 FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
263 case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
264 FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
265 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
266 FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
267 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
268 FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
269 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
270 FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
273 return getAddressWithFixup(MO, FixupKind, Fixups);
277 AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
278 SmallVectorImpl<MCFixup> &Fixups) const {
280 const MCOperand &MO = MI.getOperand(OpIdx);
282 return static_cast<unsigned>(MO.getImm());
286 unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
287 if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
288 Modifier = Expr->getKind();
290 unsigned FixupKind = 0;
292 case AArch64MCExpr::VK_AARCH64_None:
293 FixupKind = AArch64::fixup_a64_adr_prel_page;
295 case AArch64MCExpr::VK_AARCH64_GOT:
296 FixupKind = AArch64::fixup_a64_adr_prel_got_page;
298 case AArch64MCExpr::VK_AARCH64_GOTTPREL:
299 FixupKind = AArch64::fixup_a64_adr_gottprel_page;
301 case AArch64MCExpr::VK_AARCH64_TLSDESC:
302 FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
305 llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
308 return getAddressWithFixup(MO, FixupKind, Fixups);
312 AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
313 SmallVectorImpl<MCFixup> &Fixups) const {
315 const MCOperand &MO = MI.getOperand(OpIdx);
316 assert(MO.isImm() && "Only immediate expected for shift");
318 return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
322 AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
323 SmallVectorImpl<MCFixup> &Fixups) const {
325 const MCOperand &MO = MI.getOperand(OpIdx);
326 assert(MO.isImm() && "Only immediate expected for shift");
328 return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
331 unsigned AArch64MCCodeEmitter::getShiftRightImm8(
332 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
333 return 8 - MI.getOperand(Op).getImm();
336 unsigned AArch64MCCodeEmitter::getShiftRightImm16(
337 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
338 return 16 - MI.getOperand(Op).getImm();
341 unsigned AArch64MCCodeEmitter::getShiftRightImm32(
342 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
343 return 32 - MI.getOperand(Op).getImm();
346 unsigned AArch64MCCodeEmitter::getShiftRightImm64(
347 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
348 return 64 - MI.getOperand(Op).getImm();
351 unsigned AArch64MCCodeEmitter::getShiftLeftImm8(
352 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
353 return MI.getOperand(Op).getImm() - 8;
356 unsigned AArch64MCCodeEmitter::getShiftLeftImm16(
357 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
358 return MI.getOperand(Op).getImm() - 16;
361 unsigned AArch64MCCodeEmitter::getShiftLeftImm32(
362 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
363 return MI.getOperand(Op).getImm() - 32;
366 unsigned AArch64MCCodeEmitter::getShiftLeftImm64(
367 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
368 return MI.getOperand(Op).getImm() - 64;
371 template<AArch64::Fixups fixupDesired> unsigned
372 AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
374 SmallVectorImpl<MCFixup> &Fixups) const {
375 const MCOperand &MO = MI.getOperand(OpIdx);
378 return getAddressWithFixup(MO, fixupDesired, Fixups);
385 AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
387 SmallVectorImpl<MCFixup> &Fixups) const {
388 const MCOperand &MO = MI.getOperand(OpIdx);
396 if (isa<AArch64MCExpr>(MO.getExpr())) {
397 assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
398 == AArch64MCExpr::VK_AARCH64_GOTTPREL
399 && "Invalid symbol modifier for literal load");
400 FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
402 FixupKind = AArch64::fixup_a64_ld_prel;
405 return getAddressWithFixup(MO, FixupKind, Fixups);
410 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
412 SmallVectorImpl<MCFixup> &Fixups) const {
414 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
415 } else if (MO.isImm()) {
416 return static_cast<unsigned>(MO.getImm());
419 llvm_unreachable("Unable to encode MCOperand!");
424 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
425 SmallVectorImpl<MCFixup> &Fixups) const {
426 const MCOperand &UImm16MO = MI.getOperand(OpIdx);
427 const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
429 unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
431 if (UImm16MO.isImm()) {
432 Result |= UImm16MO.getImm();
436 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
437 AArch64::Fixups requestedFixup;
438 switch (A64E->getKind()) {
439 default: llvm_unreachable("unexpected expression modifier");
440 case AArch64MCExpr::VK_AARCH64_ABS_G0:
441 requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
442 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
443 requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
444 case AArch64MCExpr::VK_AARCH64_ABS_G1:
445 requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
446 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
447 requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
448 case AArch64MCExpr::VK_AARCH64_ABS_G2:
449 requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
450 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
451 requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
452 case AArch64MCExpr::VK_AARCH64_ABS_G3:
453 requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
454 case AArch64MCExpr::VK_AARCH64_SABS_G0:
455 requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
456 case AArch64MCExpr::VK_AARCH64_SABS_G1:
457 requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
458 case AArch64MCExpr::VK_AARCH64_SABS_G2:
459 requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
460 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
461 requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
462 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
463 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
464 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
465 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
466 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
467 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
468 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
469 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
470 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
471 requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
472 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
473 requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
474 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
475 requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
476 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
477 requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
478 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
479 requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
480 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
481 requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
482 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
483 requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
486 return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups);
489 template<int hasRs, int hasRt2> unsigned
490 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
491 unsigned EncodedValue) const {
492 if (!hasRs) EncodedValue |= 0x001F0000;
493 if (!hasRt2) EncodedValue |= 0x00007C00;
499 AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const {
500 // If one of the signed fixup kinds is applied to a MOVZ instruction, the
501 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
502 // job to ensure that any bits possibly affected by this are 0. This means we
503 // must zero out bit 30 (essentially emitting a MOVN).
504 MCOperand UImm16MO = MI.getOperand(1);
506 // Nothing to do if there's no fixup.
507 if (UImm16MO.isImm())
510 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
511 switch (A64E->getKind()) {
512 case AArch64MCExpr::VK_AARCH64_SABS_G0:
513 case AArch64MCExpr::VK_AARCH64_SABS_G1:
514 case AArch64MCExpr::VK_AARCH64_SABS_G2:
515 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
516 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
517 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
518 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
519 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
520 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
521 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
522 return EncodedValue & ~(1u << 30);
524 // Nothing to do for an unsigned fixup.
528 llvm_unreachable("Should have returned by now");
532 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
533 unsigned EncodedValue) const {
534 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
535 // (i.e. all bits 1) but is ignored by the processor.
536 EncodedValue |= 0x1f << 10;
540 MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
541 const MCRegisterInfo &MRI,
542 const MCSubtargetInfo &STI,
544 return new AArch64MCCodeEmitter(Ctx);
547 void AArch64MCCodeEmitter::
548 EncodeInstruction(const MCInst &MI, raw_ostream &OS,
549 SmallVectorImpl<MCFixup> &Fixups,
550 const MCSubtargetInfo &STI) const {
551 if (MI.getOpcode() == AArch64::TLSDESCCALL) {
552 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
553 // following (BLR) instruction. It doesn't emit any code itself so it
554 // doesn't go through the normal TableGenerated channels.
555 MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
557 Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
558 Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
562 uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
564 EmitInstruction(Binary, OS);
568 #include "AArch64GenMCCodeEmitter.inc"