1 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the X86MCCodeEmitter class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "mccodeemitter"
15 #include "MCTargetDesc/X86MCTargetDesc.h"
16 #include "MCTargetDesc/X86BaseInfo.h"
17 #include "MCTargetDesc/X86FixupKinds.h"
18 #include "llvm/MC/MCCodeEmitter.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCRegisterInfo.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/MC/MCSymbol.h"
26 #include "llvm/Support/raw_ostream.h"
31 class X86MCCodeEmitter : public MCCodeEmitter {
32 X86MCCodeEmitter(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
33 void operator=(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
34 const MCInstrInfo &MCII;
35 const MCSubtargetInfo &STI;
38 X86MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
40 : MCII(mcii), STI(sti), Ctx(ctx) {
43 ~X86MCCodeEmitter() {}
45 bool is64BitMode() const {
46 // FIXME: Can tablegen auto-generate this?
47 return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
50 bool is32BitMode() const {
51 // FIXME: Can tablegen auto-generate this?
52 return (STI.getFeatureBits() & X86::Mode32Bit) != 0;
55 bool is16BitMode() const {
56 // FIXME: Can tablegen auto-generate this?
57 return (STI.getFeatureBits() & X86::Mode16Bit) != 0;
60 unsigned GetX86RegNum(const MCOperand &MO) const {
61 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
64 // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
65 // 0-7 and the difference between the 2 groups is given by the REX prefix.
66 // In the VEX prefix, registers are seen sequencially from 0-15 and encoded
67 // in 1's complement form, example:
69 // ModRM field => XMM9 => 1
70 // VEX.VVVV => XMM9 => ~9
72 // See table 4-35 of Intel AVX Programming Reference for details.
73 unsigned char getVEXRegisterEncoding(const MCInst &MI,
74 unsigned OpNum) const {
75 unsigned SrcReg = MI.getOperand(OpNum).getReg();
76 unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
77 if (X86II::isX86_64ExtendedReg(SrcReg))
80 // The registers represented through VEX_VVVV should
81 // be encoded in 1's complement form.
82 return (~SrcRegNum) & 0xf;
85 unsigned char getWriteMaskRegisterEncoding(const MCInst &MI,
86 unsigned OpNum) const {
87 assert(X86::K0 != MI.getOperand(OpNum).getReg() &&
88 "Invalid mask register as write-mask!");
89 unsigned MaskRegNum = GetX86RegNum(MI.getOperand(OpNum));
93 void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
98 void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
99 raw_ostream &OS) const {
100 // Output the constant in little endian byte order.
101 for (unsigned i = 0; i != Size; ++i) {
102 EmitByte(Val & 255, CurByte, OS);
107 void EmitImmediate(const MCOperand &Disp, SMLoc Loc,
108 unsigned ImmSize, MCFixupKind FixupKind,
109 unsigned &CurByte, raw_ostream &OS,
110 SmallVectorImpl<MCFixup> &Fixups,
111 int ImmOffset = 0) const;
113 inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
115 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
116 return RM | (RegOpcode << 3) | (Mod << 6);
119 void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
120 unsigned &CurByte, raw_ostream &OS) const {
121 EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS);
124 void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base,
125 unsigned &CurByte, raw_ostream &OS) const {
126 // SIB byte is in the same format as the ModRMByte.
127 EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
131 void EmitMemModRMByte(const MCInst &MI, unsigned Op,
132 unsigned RegOpcodeField,
133 uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS,
134 SmallVectorImpl<MCFixup> &Fixups) const;
136 void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
137 SmallVectorImpl<MCFixup> &Fixups) const;
139 void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
140 const MCInst &MI, const MCInstrDesc &Desc,
141 raw_ostream &OS) const;
143 void EmitSegmentOverridePrefix(uint64_t TSFlags, unsigned &CurByte,
144 int MemOperand, const MCInst &MI,
145 raw_ostream &OS) const;
147 void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
148 const MCInst &MI, const MCInstrDesc &Desc,
149 raw_ostream &OS) const;
152 } // end anonymous namespace
155 MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
156 const MCRegisterInfo &MRI,
157 const MCSubtargetInfo &STI,
159 return new X86MCCodeEmitter(MCII, STI, Ctx);
162 /// isDisp8 - Return true if this signed displacement fits in a 8-bit
163 /// sign-extended field.
164 static bool isDisp8(int Value) {
165 return Value == (signed char)Value;
168 /// isCDisp8 - Return true if this signed displacement fits in a 8-bit
169 /// compressed dispacement field.
170 static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
171 assert(((TSFlags >> X86II::VEXShift) & X86II::EVEX) &&
172 "Compressed 8-bit displacement is only valid for EVEX inst.");
174 unsigned CD8E = (TSFlags >> X86II::EVEX_CD8EShift) & X86II::EVEX_CD8EMask;
175 unsigned CD8V = (TSFlags >> X86II::EVEX_CD8VShift) & X86II::EVEX_CD8VMask;
177 if (CD8V == 0 && CD8E == 0) {
179 return isDisp8(Value);
182 unsigned MemObjSize = 1U << CD8E;
184 // Fixed vector length
185 MemObjSize *= 1U << (CD8V & 0x3);
187 // Modified vector length
188 bool EVEX_b = (TSFlags >> X86II::VEXShift) & X86II::EVEX_B;
190 unsigned EVEX_LL = ((TSFlags >> X86II::VEXShift) & X86II::VEX_L) ? 1 : 0;
191 EVEX_LL += ((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2) ? 2 : 0;
192 assert(EVEX_LL < 3 && "");
194 unsigned NumElems = (1U << (EVEX_LL + 4)) / MemObjSize;
195 NumElems /= 1U << (CD8V & 0x3);
197 MemObjSize *= NumElems;
201 unsigned MemObjMask = MemObjSize - 1;
202 assert((MemObjSize & MemObjMask) == 0 && "Invalid memory object size.");
204 if (Value & MemObjMask) // Unaligned offset
207 bool Ret = (Value == (signed char)Value);
214 /// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
215 /// in an instruction with the specified TSFlags.
216 static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
217 unsigned Size = X86II::getSizeOfImm(TSFlags);
218 bool isPCRel = X86II::isImmPCRel(TSFlags);
220 return MCFixup::getKindForSize(Size, isPCRel);
223 /// Is32BitMemOperand - Return true if the specified instruction has
224 /// a 32-bit memory operand. Op specifies the operand # of the memoperand.
225 static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
226 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
227 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
229 if ((BaseReg.getReg() != 0 &&
230 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) ||
231 (IndexReg.getReg() != 0 &&
232 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
237 /// Is64BitMemOperand - Return true if the specified instruction has
238 /// a 64-bit memory operand. Op specifies the operand # of the memoperand.
240 static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) {
241 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
242 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
244 if ((BaseReg.getReg() != 0 &&
245 X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) ||
246 (IndexReg.getReg() != 0 &&
247 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg())))
253 /// Is16BitMemOperand - Return true if the specified instruction has
254 /// a 16-bit memory operand. Op specifies the operand # of the memoperand.
255 static bool Is16BitMemOperand(const MCInst &MI, unsigned Op) {
256 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
257 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
259 if ((BaseReg.getReg() != 0 &&
260 X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
261 (IndexReg.getReg() != 0 &&
262 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
267 /// StartsWithGlobalOffsetTable - Check if this expression starts with
268 /// _GLOBAL_OFFSET_TABLE_ and if it is of the form
269 /// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF
270 /// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
271 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start
272 /// of a binary expression.
273 enum GlobalOffsetTableExprKind {
278 static GlobalOffsetTableExprKind
279 StartsWithGlobalOffsetTable(const MCExpr *Expr) {
280 const MCExpr *RHS = 0;
281 if (Expr->getKind() == MCExpr::Binary) {
282 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
287 if (Expr->getKind() != MCExpr::SymbolRef)
290 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
291 const MCSymbol &S = Ref->getSymbol();
292 if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
294 if (RHS && RHS->getKind() == MCExpr::SymbolRef)
299 static bool HasSecRelSymbolRef(const MCExpr *Expr) {
300 if (Expr->getKind() == MCExpr::SymbolRef) {
301 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
302 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
307 void X86MCCodeEmitter::
308 EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
309 MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS,
310 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const {
311 const MCExpr *Expr = NULL;
312 if (DispOp.isImm()) {
313 // If this is a simple integer displacement that doesn't require a
314 // relocation, emit it now.
315 if (FixupKind != FK_PCRel_1 &&
316 FixupKind != FK_PCRel_2 &&
317 FixupKind != FK_PCRel_4) {
318 EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS);
321 Expr = MCConstantExpr::Create(DispOp.getImm(), Ctx);
323 Expr = DispOp.getExpr();
326 // If we have an immoffset, add it to the expression.
327 if ((FixupKind == FK_Data_4 ||
328 FixupKind == FK_Data_8 ||
329 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
330 GlobalOffsetTableExprKind Kind = StartsWithGlobalOffsetTable(Expr);
331 if (Kind != GOT_None) {
332 assert(ImmOffset == 0);
334 FixupKind = MCFixupKind(X86::reloc_global_offset_table);
335 if (Kind == GOT_Normal)
337 } else if (Expr->getKind() == MCExpr::SymbolRef) {
338 if (HasSecRelSymbolRef(Expr)) {
339 FixupKind = MCFixupKind(FK_SecRel_4);
341 } else if (Expr->getKind() == MCExpr::Binary) {
342 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr*>(Expr);
343 if (HasSecRelSymbolRef(Bin->getLHS())
344 || HasSecRelSymbolRef(Bin->getRHS())) {
345 FixupKind = MCFixupKind(FK_SecRel_4);
350 // If the fixup is pc-relative, we need to bias the value to be relative to
351 // the start of the field, not the end of the field.
352 if (FixupKind == FK_PCRel_4 ||
353 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
354 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load))
356 if (FixupKind == FK_PCRel_2)
358 if (FixupKind == FK_PCRel_1)
362 Expr = MCBinaryExpr::CreateAdd(Expr, MCConstantExpr::Create(ImmOffset, Ctx),
365 // Emit a symbolic constant as a fixup and 4 zeros.
366 Fixups.push_back(MCFixup::Create(CurByte, Expr, FixupKind, Loc));
367 EmitConstant(0, Size, CurByte, OS);
370 void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
371 unsigned RegOpcodeField,
372 uint64_t TSFlags, unsigned &CurByte,
374 SmallVectorImpl<MCFixup> &Fixups) const{
375 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
376 const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg);
377 const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
378 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
379 unsigned BaseReg = Base.getReg();
380 bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
382 // Handle %rip relative addressing.
383 if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode
384 assert(is64BitMode() && "Rip-relative addressing requires 64-bit mode");
385 assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
386 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
388 unsigned FixupKind = X86::reloc_riprel_4byte;
390 // movq loads are handled with a special relocation form which allows the
391 // linker to eliminate some loads for GOT references which end up in the
392 // same linkage unit.
393 if (MI.getOpcode() == X86::MOV64rm)
394 FixupKind = X86::reloc_riprel_4byte_movq_load;
396 // rip-relative addressing is actually relative to the *next* instruction.
397 // Since an immediate can follow the mod/rm byte for an instruction, this
398 // means that we need to bias the immediate field of the instruction with
399 // the size of the immediate field. If we have this case, add it into the
400 // expression to emit.
401 int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
403 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind),
404 CurByte, OS, Fixups, -ImmSize);
408 unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
410 // 16-bit addressing forms of the ModR/M byte have a different encoding for
411 // the R/M field and are far more limited in which registers can be used.
412 if (Is16BitMemOperand(MI, Op)) {
414 // For 32-bit addressing, the row and column values in Table 2-2 are
415 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
416 // some special cases. And GetX86RegNum reflects that numbering.
417 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
418 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
419 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
420 // while values 0-3 indicate the allowed combinations (base+index) of
421 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
423 // R16Table[] is a lookup from the normal RegNo, to the row values from
424 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
425 static const unsigned R16Table[] = { 0, 0, 0, 7, 0, 6, 4, 5 };
426 unsigned RMfield = R16Table[BaseRegNo];
428 assert(RMfield && "invalid 16-bit base register");
430 if (IndexReg.getReg()) {
431 unsigned IndexReg16 = R16Table[GetX86RegNum(IndexReg)];
433 assert(IndexReg16 && "invalid 16-bit index register");
434 // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
435 assert(((IndexReg16 ^ RMfield) & 2) &&
436 "invalid 16-bit base/index register combination");
437 assert(Scale.getImm() == 1 &&
438 "invalid scale for 16-bit memory reference");
440 // Allow base/index to appear in either order (although GAS doesn't).
442 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
444 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
447 if (Disp.isImm() && isDisp8(Disp.getImm())) {
448 if (Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
449 // There is no displacement; just the register.
450 EmitByte(ModRMByte(0, RegOpcodeField, RMfield), CurByte, OS);
453 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
454 EmitByte(ModRMByte(1, RegOpcodeField, RMfield), CurByte, OS);
455 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
458 // This is the [REG]+disp16 case.
459 EmitByte(ModRMByte(2, RegOpcodeField, RMfield), CurByte, OS);
461 // There is no BaseReg; this is the plain [disp16] case.
462 EmitByte(ModRMByte(0, RegOpcodeField, 6), CurByte, OS);
465 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
466 EmitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups);
470 // Determine whether a SIB byte is needed.
471 // If no BaseReg, issue a RIP relative instruction only if the MCE can
472 // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
473 // 2-7) and absolute references.
475 if (// The SIB byte must be used if there is an index register.
476 IndexReg.getReg() == 0 &&
477 // The SIB byte must be used if the base is ESP/RSP/R12, all of which
478 // encode to an R/M value of 4, which indicates that a SIB byte is
480 BaseRegNo != N86::ESP &&
481 // If there is no base register and we're in 64-bit mode, we need a SIB
482 // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
483 (!is64BitMode() || BaseReg != 0)) {
485 if (BaseReg == 0) { // [disp32] in X86-32 mode
486 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
487 EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups);
491 // If the base is not EBP/ESP and there is no displacement, use simple
492 // indirect register encoding, this handles addresses like [EAX]. The
493 // encoding for [EBP] with no displacement means [disp32] so we handle it
494 // by emitting a displacement of 0 below.
495 if (Disp.isImm() && Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
496 EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS);
500 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
502 if (!HasEVEX && isDisp8(Disp.getImm())) {
503 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
504 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
507 // Try EVEX compressed 8-bit displacement first; if failed, fall back to
508 // 32-bit displacement.
510 if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
511 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
512 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups,
513 CDisp8 - Disp.getImm());
518 // Otherwise, emit the most general non-SIB encoding: [REG+disp32]
519 EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
520 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS,
525 // We need a SIB byte, so start by outputting the ModR/M byte first
526 assert(IndexReg.getReg() != X86::ESP &&
527 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
529 bool ForceDisp32 = false;
530 bool ForceDisp8 = false;
534 // If there is no base register, we emit the special case SIB byte with
535 // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
536 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
538 } else if (!Disp.isImm()) {
539 // Emit the normal disp32 encoding.
540 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
542 } else if (Disp.getImm() == 0 &&
543 // Base reg can't be anything that ends up with '5' as the base
544 // reg, it is the magic [*] nomenclature that indicates no base.
545 BaseRegNo != N86::EBP) {
546 // Emit no displacement ModR/M byte
547 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
548 } else if (!HasEVEX && isDisp8(Disp.getImm())) {
549 // Emit the disp8 encoding.
550 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
551 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
552 } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
553 // Emit the disp8 encoding.
554 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
555 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
556 ImmOffset = CDisp8 - Disp.getImm();
558 // Emit the normal disp32 encoding.
559 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
562 // Calculate what the SS field value should be...
563 static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 };
564 unsigned SS = SSTable[Scale.getImm()];
567 // Handle the SIB byte for the case where there is no base, see Intel
568 // Manual 2A, table 2-7. The displacement has already been output.
570 if (IndexReg.getReg())
571 IndexRegNo = GetX86RegNum(IndexReg);
572 else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5)
574 EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS);
577 if (IndexReg.getReg())
578 IndexRegNo = GetX86RegNum(IndexReg);
580 IndexRegNo = 4; // For example [ESP+1*<noreg>+4]
581 EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS);
584 // Do we need to output a displacement?
586 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset);
587 else if (ForceDisp32 || Disp.getImm() != 0)
588 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
589 CurByte, OS, Fixups);
592 /// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
594 void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
595 int MemOperand, const MCInst &MI,
596 const MCInstrDesc &Desc,
597 raw_ostream &OS) const {
598 bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
599 bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
600 bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
601 bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
602 bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
603 bool HasEVEX_RC = false;
605 // VEX_R: opcode externsion equivalent to REX.R in
606 // 1's complement (inverted) form
608 // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
609 // 0: Same as REX_R=1 (64 bit mode only)
611 unsigned char VEX_R = 0x1;
612 unsigned char EVEX_R2 = 0x1;
614 // VEX_X: equivalent to REX.X, only used when a
615 // register is used for index in SIB Byte.
617 // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
618 // 0: Same as REX.X=1 (64-bit mode only)
619 unsigned char VEX_X = 0x1;
623 // 1: Same as REX_B=0 (ignored in 32-bit mode)
624 // 0: Same as REX_B=1 (64 bit mode only)
626 unsigned char VEX_B = 0x1;
628 // VEX_W: opcode specific (use like REX.W, or used for
629 // opcode extension, or ignored, depending on the opcode byte)
630 unsigned char VEX_W = 0;
632 // XOP: Use XOP prefix byte 0x8f instead of VEX.
635 // VEX_5M (VEX m-mmmmm field):
637 // 0b00000: Reserved for future use
638 // 0b00001: implied 0F leading opcode
639 // 0b00010: implied 0F 38 leading opcode bytes
640 // 0b00011: implied 0F 3A leading opcode bytes
641 // 0b00100-0b11111: Reserved for future use
642 // 0b01000: XOP map select - 08h instructions with imm byte
643 // 0b01001: XOP map select - 09h instructions with no imm byte
644 // 0b01010: XOP map select - 0Ah instructions with imm dword
645 unsigned char VEX_5M = 0x1;
647 // VEX_4V (VEX vvvv field): a register specifier
648 // (in 1's complement form) or 1111 if unused.
649 unsigned char VEX_4V = 0xf;
650 unsigned char EVEX_V2 = 0x1;
652 // VEX_L (Vector Length):
654 // 0: scalar or 128-bit vector
657 unsigned char VEX_L = 0;
658 unsigned char EVEX_L2 = 0;
660 // VEX_PP: opcode extension providing equivalent
661 // functionality of a SIMD prefix
668 unsigned char VEX_PP = 0;
671 unsigned char EVEX_U = 1; // Always '1' so far
674 unsigned char EVEX_z = 0;
677 unsigned char EVEX_b = 0;
680 unsigned char EVEX_rc = 0;
683 unsigned char EVEX_aaa = 0;
685 // Encode the operand size opcode prefix as needed.
686 if (TSFlags & X86II::OpSize)
689 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W)
692 if ((TSFlags >> X86II::VEXShift) & X86II::XOP)
695 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
697 if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2))
700 if (HasEVEX_K && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_Z))
703 if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_B))
706 switch (TSFlags & X86II::Op0Mask) {
707 default: llvm_unreachable("Invalid prefix!");
708 case X86II::T8: // 0F 38
711 case X86II::TA: // 0F 3A
714 case X86II::T8XS: // F3 0F 38
718 case X86II::T8XD: // F2 0F 38
722 case X86II::TAXD: // F2 0F 3A
726 case X86II::XS: // F3 0F
729 case X86II::XD: // F2 0F
741 case X86II::TB: // VEX_5M/VEX_PP already correct
746 // Classify VEX_B, VEX_4V, VEX_R, VEX_X
747 unsigned NumOps = Desc.getNumOperands();
748 unsigned RcOperand = NumOps-1;
750 if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0)
752 else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 &&
753 Desc.getOperandConstraint(3, MCOI::TIED_TO) == 1)
754 // Special case for AVX-512 GATHER with 2 TIED_TO operands
755 // Skip the first 2 operands: dst, mask_wb
757 else if (NumOps > 3 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 &&
758 Desc.getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1)
759 // Special case for GATHER with 2 TIED_TO operands
760 // Skip the first 2 operands: dst, mask_wb
762 else if (NumOps > 2 && Desc.getOperandConstraint(NumOps - 2, MCOI::TIED_TO) == 0)
766 switch (TSFlags & X86II::FormMask) {
767 case X86II::MRMDestMem: {
768 // MRMDestMem instructions forms:
769 // MemAddr, src1(ModR/M)
770 // MemAddr, src1(VEX_4V), src2(ModR/M)
771 // MemAddr, src1(ModR/M), imm8
773 if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
774 X86::AddrBaseReg).getReg()))
776 if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
777 X86::AddrIndexReg).getReg()))
779 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand +
780 X86::AddrIndexReg).getReg()))
783 CurOp += X86::AddrNumOperands;
786 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
789 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
790 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
795 const MCOperand &MO = MI.getOperand(CurOp);
797 if (X86II::isX86_64ExtendedReg(MO.getReg()))
799 if (HasEVEX && X86II::is32ExtendedReg(MO.getReg()))
804 case X86II::MRMSrcMem:
805 // MRMSrcMem instructions forms:
806 // src1(ModR/M), MemAddr
807 // src1(ModR/M), src2(VEX_4V), MemAddr
808 // src1(ModR/M), MemAddr, imm8
809 // src1(ModR/M), MemAddr, src2(VEX_I8IMM)
812 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
813 // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
814 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
816 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
821 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
824 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
825 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
830 if (X86II::isX86_64ExtendedReg(
831 MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
833 if (X86II::isX86_64ExtendedReg(
834 MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
836 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand +
837 X86::AddrIndexReg).getReg()))
841 // Instruction format for 4VOp3:
842 // src1(ModR/M), MemAddr, src3(VEX_4V)
843 // CurOp points to start of the MemoryOperand,
844 // it skips TIED_TO operands if exist, then increments past src1.
845 // CurOp + X86::AddrNumOperands will point to src3.
846 VEX_4V = getVEXRegisterEncoding(MI, CurOp+X86::AddrNumOperands);
848 case X86II::MRM0m: case X86II::MRM1m:
849 case X86II::MRM2m: case X86II::MRM3m:
850 case X86II::MRM4m: case X86II::MRM5m:
851 case X86II::MRM6m: case X86II::MRM7m: {
852 // MRM[0-9]m instructions forms:
854 // src1(VEX_4V), MemAddr
856 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
857 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
863 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
865 if (X86II::isX86_64ExtendedReg(
866 MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
868 if (X86II::isX86_64ExtendedReg(
869 MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
873 case X86II::MRMSrcReg:
874 // MRMSrcReg instructions forms:
875 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
876 // dst(ModR/M), src1(ModR/M)
877 // dst(ModR/M), src1(ModR/M), imm8
880 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
881 // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
882 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
884 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
889 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
892 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
893 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
898 if (HasMemOp4) // Skip second register source (encoded in I8IMM)
901 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
903 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
907 VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
909 assert(RcOperand >= CurOp);
910 EVEX_rc = MI.getOperand(RcOperand).getImm() & 0x3;
914 case X86II::MRMDestReg:
915 // MRMDestReg instructions forms:
916 // dst(ModR/M), src(ModR/M)
917 // dst(ModR/M), src(ModR/M), imm8
918 // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
919 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
921 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
926 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
929 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
930 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
935 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
937 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
940 case X86II::MRM0r: case X86II::MRM1r:
941 case X86II::MRM2r: case X86II::MRM3r:
942 case X86II::MRM4r: case X86II::MRM5r:
943 case X86II::MRM6r: case X86II::MRM7r:
944 // MRM0r-MRM7r instructions forms:
945 // dst(VEX_4V), src(ModR/M), imm8
947 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
948 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
953 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
955 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
957 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
964 // Emit segment override opcode prefix as needed.
965 EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
968 // VEX opcode prefix can have 2 or 3 bytes
971 // +-----+ +--------------+ +-------------------+
972 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
973 // +-----+ +--------------+ +-------------------+
975 // +-----+ +-------------------+
976 // | C5h | | R | vvvv | L | pp |
977 // +-----+ +-------------------+
979 unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
981 if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix
982 EmitByte(0xC5, CurByte, OS);
983 EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
988 EmitByte(XOP ? 0x8F : 0xC4, CurByte, OS);
989 EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
990 EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
992 // EVEX opcode prefix can have 4 bytes
994 // +-----+ +--------------+ +-------------------+ +------------------------+
995 // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa |
996 // +-----+ +--------------+ +-------------------+ +------------------------+
997 assert((VEX_5M & 0x3) == VEX_5M
998 && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!");
1002 EmitByte(0x62, CurByte, OS);
1003 EmitByte((VEX_R << 7) |
1007 VEX_5M, CurByte, OS);
1008 EmitByte((VEX_W << 7) |
1011 VEX_PP, CurByte, OS);
1013 EmitByte((EVEX_z << 7) |
1017 EVEX_aaa, CurByte, OS);
1019 EmitByte((EVEX_z << 7) |
1024 EVEX_aaa, CurByte, OS);
1028 /// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
1029 /// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
1030 /// size, and 3) use of X86-64 extended registers.
1031 static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
1032 const MCInstrDesc &Desc) {
1034 if (TSFlags & X86II::REX_W)
1035 REX |= 1 << 3; // set REX.W
1037 if (MI.getNumOperands() == 0) return REX;
1039 unsigned NumOps = MI.getNumOperands();
1040 // FIXME: MCInst should explicitize the two-addrness.
1041 bool isTwoAddr = NumOps > 1 &&
1042 Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
1044 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
1045 unsigned i = isTwoAddr ? 1 : 0;
1046 for (; i != NumOps; ++i) {
1047 const MCOperand &MO = MI.getOperand(i);
1048 if (!MO.isReg()) continue;
1049 unsigned Reg = MO.getReg();
1050 if (!X86II::isX86_64NonExtLowByteReg(Reg)) continue;
1051 // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
1052 // that returns non-zero.
1053 REX |= 0x40; // REX fixed encoding prefix
1057 switch (TSFlags & X86II::FormMask) {
1058 case X86II::MRMSrcReg:
1059 if (MI.getOperand(0).isReg() &&
1060 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
1061 REX |= 1 << 2; // set REX.R
1062 i = isTwoAddr ? 2 : 1;
1063 for (; i != NumOps; ++i) {
1064 const MCOperand &MO = MI.getOperand(i);
1065 if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
1066 REX |= 1 << 0; // set REX.B
1069 case X86II::MRMSrcMem: {
1070 if (MI.getOperand(0).isReg() &&
1071 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
1072 REX |= 1 << 2; // set REX.R
1074 i = isTwoAddr ? 2 : 1;
1075 for (; i != NumOps; ++i) {
1076 const MCOperand &MO = MI.getOperand(i);
1078 if (X86II::isX86_64ExtendedReg(MO.getReg()))
1079 REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1)
1085 case X86II::MRM0m: case X86II::MRM1m:
1086 case X86II::MRM2m: case X86II::MRM3m:
1087 case X86II::MRM4m: case X86II::MRM5m:
1088 case X86II::MRM6m: case X86II::MRM7m:
1089 case X86II::MRMDestMem: {
1090 unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
1091 i = isTwoAddr ? 1 : 0;
1092 if (NumOps > e && MI.getOperand(e).isReg() &&
1093 X86II::isX86_64ExtendedReg(MI.getOperand(e).getReg()))
1094 REX |= 1 << 2; // set REX.R
1096 for (; i != e; ++i) {
1097 const MCOperand &MO = MI.getOperand(i);
1099 if (X86II::isX86_64ExtendedReg(MO.getReg()))
1100 REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1)
1107 if (MI.getOperand(0).isReg() &&
1108 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
1109 REX |= 1 << 0; // set REX.B
1110 i = isTwoAddr ? 2 : 1;
1111 for (unsigned e = NumOps; i != e; ++i) {
1112 const MCOperand &MO = MI.getOperand(i);
1113 if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
1114 REX |= 1 << 2; // set REX.R
1121 /// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
1122 void X86MCCodeEmitter::EmitSegmentOverridePrefix(uint64_t TSFlags,
1123 unsigned &CurByte, int MemOperand,
1125 raw_ostream &OS) const {
1127 return; // No memory operand
1129 // Check for explicit segment override on memory operand.
1130 switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
1131 default: llvm_unreachable("Unknown segment register!");
1133 case X86::CS: EmitByte(0x2E, CurByte, OS); break;
1134 case X86::SS: EmitByte(0x36, CurByte, OS); break;
1135 case X86::DS: EmitByte(0x3E, CurByte, OS); break;
1136 case X86::ES: EmitByte(0x26, CurByte, OS); break;
1137 case X86::FS: EmitByte(0x64, CurByte, OS); break;
1138 case X86::GS: EmitByte(0x65, CurByte, OS); break;
1142 /// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
1144 /// MemOperand is the operand # of the start of a memory operand if present. If
1145 /// Not present, it is -1.
1146 void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
1147 int MemOperand, const MCInst &MI,
1148 const MCInstrDesc &Desc,
1149 raw_ostream &OS) const {
1151 // Emit the lock opcode prefix as needed.
1152 if (TSFlags & X86II::LOCK)
1153 EmitByte(0xF0, CurByte, OS);
1155 // Emit segment override opcode prefix as needed.
1156 EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
1158 // Emit the repeat opcode prefix as needed.
1159 if ((TSFlags & X86II::Op0Mask) == X86II::REP)
1160 EmitByte(0xF3, CurByte, OS);
1162 // Emit the address size opcode prefix as needed.
1163 bool need_address_override;
1164 if (TSFlags & X86II::AdSize) {
1165 need_address_override = true;
1166 } else if (MemOperand == -1) {
1167 need_address_override = false;
1168 } else if (is64BitMode()) {
1169 assert(!Is16BitMemOperand(MI, MemOperand));
1170 need_address_override = Is32BitMemOperand(MI, MemOperand);
1171 } else if (is32BitMode()) {
1172 assert(!Is64BitMemOperand(MI, MemOperand));
1173 need_address_override = Is16BitMemOperand(MI, MemOperand);
1175 assert(is16BitMode());
1176 assert(!Is64BitMemOperand(MI, MemOperand));
1177 need_address_override = !Is16BitMemOperand(MI, MemOperand);
1180 if (need_address_override)
1181 EmitByte(0x67, CurByte, OS);
1183 // Emit the operand size opcode prefix as needed.
1184 if (TSFlags & (is16BitMode() ? X86II::OpSize16 : X86II::OpSize))
1185 EmitByte(0x66, CurByte, OS);
1187 bool Need0FPrefix = false;
1188 switch (TSFlags & X86II::Op0Mask) {
1189 default: llvm_unreachable("Invalid prefix!");
1190 case 0: break; // No prefix!
1191 case X86II::REP: break; // already handled.
1192 case X86II::TB: // Two-byte opcode prefix
1193 case X86II::T8: // 0F 38
1194 case X86II::TA: // 0F 3A
1195 case X86II::A6: // 0F A6
1196 case X86II::A7: // 0F A7
1197 Need0FPrefix = true;
1199 case X86II::XS: // F3 0F
1200 case X86II::T8XS: // F3 0F 38
1201 EmitByte(0xF3, CurByte, OS);
1202 Need0FPrefix = true;
1204 case X86II::XD: // F2 0F
1205 case X86II::T8XD: // F2 0F 38
1206 case X86II::TAXD: // F2 0F 3A
1207 EmitByte(0xF2, CurByte, OS);
1208 Need0FPrefix = true;
1218 EmitByte(0xD8+(((TSFlags & X86II::Op0Mask) - X86II::D8) >> X86II::Op0Shift),
1223 // Handle REX prefix.
1224 // FIXME: Can this come before F2 etc to simplify emission?
1225 if (is64BitMode()) {
1226 if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc))
1227 EmitByte(0x40 | REX, CurByte, OS);
1230 // 0x0F escape code must be emitted just before the opcode.
1232 EmitByte(0x0F, CurByte, OS);
1234 // FIXME: Pull this up into previous switch if REX can be moved earlier.
1235 switch (TSFlags & X86II::Op0Mask) {
1236 case X86II::T8XS: // F3 0F 38
1237 case X86II::T8XD: // F2 0F 38
1238 case X86II::T8: // 0F 38
1239 EmitByte(0x38, CurByte, OS);
1241 case X86II::TAXD: // F2 0F 3A
1242 case X86II::TA: // 0F 3A
1243 EmitByte(0x3A, CurByte, OS);
1245 case X86II::A6: // 0F A6
1246 EmitByte(0xA6, CurByte, OS);
1248 case X86II::A7: // 0F A7
1249 EmitByte(0xA7, CurByte, OS);
1254 void X86MCCodeEmitter::
1255 EncodeInstruction(const MCInst &MI, raw_ostream &OS,
1256 SmallVectorImpl<MCFixup> &Fixups) const {
1257 unsigned Opcode = MI.getOpcode();
1258 const MCInstrDesc &Desc = MCII.get(Opcode);
1259 uint64_t TSFlags = Desc.TSFlags;
1261 // Pseudo instructions don't get encoded.
1262 if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
1265 unsigned NumOps = Desc.getNumOperands();
1266 unsigned CurOp = X86II::getOperandBias(Desc);
1268 // Keep track of the current byte being emitted.
1269 unsigned CurByte = 0;
1271 // Is this instruction encoded using the AVX VEX prefix?
1272 bool HasVEXPrefix = (TSFlags >> X86II::VEXShift) & X86II::VEX;
1274 // It uses the VEX.VVVV field?
1275 bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
1276 bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
1277 bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
1278 const unsigned MemOp4_I8IMMOperand = 2;
1280 // It uses the EVEX.aaa field?
1281 bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
1282 bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
1283 bool HasEVEX_B = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_B);
1285 // Determine where the memory operand starts, if present.
1286 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
1287 if (MemoryOperand != -1) MemoryOperand += CurOp;
1290 EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
1292 EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
1294 unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
1296 if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
1297 BaseOpcode = 0x0F; // Weird 3DNow! encoding.
1299 unsigned SrcRegNum = 0;
1300 switch (TSFlags & X86II::FormMask) {
1301 default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
1302 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
1304 llvm_unreachable("Pseudo instruction shouldn't be emitted");
1306 EmitByte(BaseOpcode, CurByte, OS);
1308 case X86II::RawFrmImm8:
1309 EmitByte(BaseOpcode, CurByte, OS);
1310 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1311 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1312 CurByte, OS, Fixups);
1313 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte,
1316 case X86II::RawFrmImm16:
1317 EmitByte(BaseOpcode, CurByte, OS);
1318 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1319 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1320 CurByte, OS, Fixups);
1321 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte,
1325 case X86II::AddRegFrm:
1326 EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
1329 case X86II::MRMDestReg:
1330 EmitByte(BaseOpcode, CurByte, OS);
1331 SrcRegNum = CurOp + 1;
1333 if (HasEVEX_K) // Skip writemask
1336 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1339 EmitRegModRMByte(MI.getOperand(CurOp),
1340 GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS);
1341 CurOp = SrcRegNum + 1;
1344 case X86II::MRMDestMem:
1345 EmitByte(BaseOpcode, CurByte, OS);
1346 SrcRegNum = CurOp + X86::AddrNumOperands;
1348 if (HasEVEX_K) // Skip writemask
1351 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1354 EmitMemModRMByte(MI, CurOp,
1355 GetX86RegNum(MI.getOperand(SrcRegNum)),
1356 TSFlags, CurByte, OS, Fixups);
1357 CurOp = SrcRegNum + 1;
1360 case X86II::MRMSrcReg:
1361 EmitByte(BaseOpcode, CurByte, OS);
1362 SrcRegNum = CurOp + 1;
1364 if (HasEVEX_K) // Skip writemask
1367 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1370 if (HasMemOp4) // Skip 2nd src (which is encoded in I8IMM)
1373 EmitRegModRMByte(MI.getOperand(SrcRegNum),
1374 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1376 // 2 operands skipped with HasMemOp4, compensate accordingly
1377 CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
1380 // do not count the rounding control operand
1385 case X86II::MRMSrcMem: {
1386 int AddrOperands = X86::AddrNumOperands;
1387 unsigned FirstMemOp = CurOp+1;
1389 if (HasEVEX_K) { // Skip writemask
1396 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1398 if (HasMemOp4) // Skip second register source (encoded in I8IMM)
1401 EmitByte(BaseOpcode, CurByte, OS);
1403 EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1404 TSFlags, CurByte, OS, Fixups);
1405 CurOp += AddrOperands + 1;
1411 case X86II::MRM0r: case X86II::MRM1r:
1412 case X86II::MRM2r: case X86II::MRM3r:
1413 case X86II::MRM4r: case X86II::MRM5r:
1414 case X86II::MRM6r: case X86II::MRM7r:
1415 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1417 EmitByte(BaseOpcode, CurByte, OS);
1418 EmitRegModRMByte(MI.getOperand(CurOp++),
1419 (TSFlags & X86II::FormMask)-X86II::MRM0r,
1422 case X86II::MRM0m: case X86II::MRM1m:
1423 case X86II::MRM2m: case X86II::MRM3m:
1424 case X86II::MRM4m: case X86II::MRM5m:
1425 case X86II::MRM6m: case X86II::MRM7m:
1426 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1428 EmitByte(BaseOpcode, CurByte, OS);
1429 EmitMemModRMByte(MI, CurOp, (TSFlags & X86II::FormMask)-X86II::MRM0m,
1430 TSFlags, CurByte, OS, Fixups);
1431 CurOp += X86::AddrNumOperands;
1433 case X86II::MRM_C1: case X86II::MRM_C2: case X86II::MRM_C3:
1434 case X86II::MRM_C4: case X86II::MRM_C8: case X86II::MRM_C9:
1435 case X86II::MRM_CA: case X86II::MRM_CB: case X86II::MRM_D0:
1436 case X86II::MRM_D1: case X86II::MRM_D4: case X86II::MRM_D5:
1437 case X86II::MRM_D6: case X86II::MRM_D8: case X86II::MRM_D9:
1438 case X86II::MRM_DA: case X86II::MRM_DB: case X86II::MRM_DC:
1439 case X86II::MRM_DD: case X86II::MRM_DE: case X86II::MRM_DF:
1440 case X86II::MRM_E8: case X86II::MRM_F0: case X86II::MRM_F8:
1442 EmitByte(BaseOpcode, CurByte, OS);
1445 switch (TSFlags & X86II::FormMask) {
1446 default: llvm_unreachable("Invalid Form");
1447 case X86II::MRM_C1: MRM = 0xC1; break;
1448 case X86II::MRM_C2: MRM = 0xC2; break;
1449 case X86II::MRM_C3: MRM = 0xC3; break;
1450 case X86II::MRM_C4: MRM = 0xC4; break;
1451 case X86II::MRM_C8: MRM = 0xC8; break;
1452 case X86II::MRM_C9: MRM = 0xC9; break;
1453 case X86II::MRM_CA: MRM = 0xCA; break;
1454 case X86II::MRM_CB: MRM = 0xCB; break;
1455 case X86II::MRM_D0: MRM = 0xD0; break;
1456 case X86II::MRM_D1: MRM = 0xD1; break;
1457 case X86II::MRM_D4: MRM = 0xD4; break;
1458 case X86II::MRM_D5: MRM = 0xD5; break;
1459 case X86II::MRM_D6: MRM = 0xD6; break;
1460 case X86II::MRM_D8: MRM = 0xD8; break;
1461 case X86II::MRM_D9: MRM = 0xD9; break;
1462 case X86II::MRM_DA: MRM = 0xDA; break;
1463 case X86II::MRM_DB: MRM = 0xDB; break;
1464 case X86II::MRM_DC: MRM = 0xDC; break;
1465 case X86II::MRM_DD: MRM = 0xDD; break;
1466 case X86II::MRM_DE: MRM = 0xDE; break;
1467 case X86II::MRM_DF: MRM = 0xDF; break;
1468 case X86II::MRM_E8: MRM = 0xE8; break;
1469 case X86II::MRM_F0: MRM = 0xF0; break;
1470 case X86II::MRM_F8: MRM = 0xF8; break;
1471 case X86II::MRM_F9: MRM = 0xF9; break;
1473 EmitByte(MRM, CurByte, OS);
1477 // If there is a remaining operand, it must be a trailing immediate. Emit it
1478 // according to the right size for the instruction. Some instructions
1479 // (SSE4a extrq and insertq) have two trailing immediates.
1480 while (CurOp != NumOps && NumOps - CurOp <= 2) {
1481 // The last source register of a 4 operand instruction in AVX is encoded
1482 // in bits[7:4] of a immediate byte.
1483 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM) {
1484 const MCOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
1487 unsigned RegNum = GetX86RegNum(MO) << 4;
1488 if (X86II::isX86_64ExtendedReg(MO.getReg()))
1490 // If there is an additional 5th operand it must be an immediate, which
1491 // is encoded in bits[3:0]
1492 if (CurOp != NumOps) {
1493 const MCOperand &MIMM = MI.getOperand(CurOp++);
1495 unsigned Val = MIMM.getImm();
1496 assert(Val < 16 && "Immediate operand value out of range");
1500 EmitImmediate(MCOperand::CreateImm(RegNum), MI.getLoc(), 1, FK_Data_1,
1501 CurByte, OS, Fixups);
1504 // FIXME: Is there a better way to know that we need a signed relocation?
1505 if (MI.getOpcode() == X86::ADD64ri32 ||
1506 MI.getOpcode() == X86::MOV64ri32 ||
1507 MI.getOpcode() == X86::MOV64mi32 ||
1508 MI.getOpcode() == X86::PUSH64i32)
1509 FixupKind = X86::reloc_signed_4byte;
1511 FixupKind = getImmFixupKind(TSFlags);
1512 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1513 X86II::getSizeOfImm(TSFlags), MCFixupKind(FixupKind),
1514 CurByte, OS, Fixups);
1518 if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
1519 EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS);
1523 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
1524 errs() << "Cannot encode all operands of: ";