1 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the X86MCCodeEmitter class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "mccodeemitter"
15 #include "MCTargetDesc/X86MCTargetDesc.h"
16 #include "MCTargetDesc/X86BaseInfo.h"
17 #include "MCTargetDesc/X86FixupKinds.h"
18 #include "llvm/MC/MCCodeEmitter.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCRegisterInfo.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/MC/MCSymbol.h"
26 #include "llvm/Support/raw_ostream.h"
31 class X86MCCodeEmitter : public MCCodeEmitter {
32 X86MCCodeEmitter(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
33 void operator=(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION;
34 const MCInstrInfo &MCII;
37 X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
38 : MCII(mcii), Ctx(ctx) {
41 ~X86MCCodeEmitter() {}
43 bool is64BitMode(const MCSubtargetInfo &STI) const {
44 return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
47 bool is32BitMode(const MCSubtargetInfo &STI) const {
48 return (STI.getFeatureBits() & X86::Mode32Bit) != 0;
51 bool is16BitMode(const MCSubtargetInfo &STI) const {
52 return (STI.getFeatureBits() & X86::Mode16Bit) != 0;
55 /// Is16BitMemOperand - Return true if the specified instruction has
56 /// a 16-bit memory operand. Op specifies the operand # of the memoperand.
57 bool Is16BitMemOperand(const MCInst &MI, unsigned Op,
58 const MCSubtargetInfo &STI) const {
59 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
60 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
61 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
63 if (is16BitMode(STI) && BaseReg.getReg() == 0 &&
64 Disp.isImm() && Disp.getImm() < 0x10000)
66 if ((BaseReg.getReg() != 0 &&
67 X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
68 (IndexReg.getReg() != 0 &&
69 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
74 unsigned GetX86RegNum(const MCOperand &MO) const {
75 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
78 // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
79 // 0-7 and the difference between the 2 groups is given by the REX prefix.
80 // In the VEX prefix, registers are seen sequencially from 0-15 and encoded
81 // in 1's complement form, example:
83 // ModRM field => XMM9 => 1
84 // VEX.VVVV => XMM9 => ~9
86 // See table 4-35 of Intel AVX Programming Reference for details.
87 unsigned char getVEXRegisterEncoding(const MCInst &MI,
88 unsigned OpNum) const {
89 unsigned SrcReg = MI.getOperand(OpNum).getReg();
90 unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
91 if (X86II::isX86_64ExtendedReg(SrcReg))
94 // The registers represented through VEX_VVVV should
95 // be encoded in 1's complement form.
96 return (~SrcRegNum) & 0xf;
99 unsigned char getWriteMaskRegisterEncoding(const MCInst &MI,
100 unsigned OpNum) const {
101 assert(X86::K0 != MI.getOperand(OpNum).getReg() &&
102 "Invalid mask register as write-mask!");
103 unsigned MaskRegNum = GetX86RegNum(MI.getOperand(OpNum));
107 void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
112 void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
113 raw_ostream &OS) const {
114 // Output the constant in little endian byte order.
115 for (unsigned i = 0; i != Size; ++i) {
116 EmitByte(Val & 255, CurByte, OS);
121 void EmitImmediate(const MCOperand &Disp, SMLoc Loc,
122 unsigned ImmSize, MCFixupKind FixupKind,
123 unsigned &CurByte, raw_ostream &OS,
124 SmallVectorImpl<MCFixup> &Fixups,
125 int ImmOffset = 0) const;
127 inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
129 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
130 return RM | (RegOpcode << 3) | (Mod << 6);
133 void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
134 unsigned &CurByte, raw_ostream &OS) const {
135 EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS);
138 void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base,
139 unsigned &CurByte, raw_ostream &OS) const {
140 // SIB byte is in the same format as the ModRMByte.
141 EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
145 void EmitMemModRMByte(const MCInst &MI, unsigned Op,
146 unsigned RegOpcodeField,
147 uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS,
148 SmallVectorImpl<MCFixup> &Fixups,
149 const MCSubtargetInfo &STI) const;
151 void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
152 SmallVectorImpl<MCFixup> &Fixups,
153 const MCSubtargetInfo &STI) const;
155 void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
156 const MCInst &MI, const MCInstrDesc &Desc,
157 raw_ostream &OS) const;
159 void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand,
160 const MCInst &MI, raw_ostream &OS) const;
162 void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
163 const MCInst &MI, const MCInstrDesc &Desc,
164 const MCSubtargetInfo &STI,
165 raw_ostream &OS) const;
168 } // end anonymous namespace
171 MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
172 const MCRegisterInfo &MRI,
173 const MCSubtargetInfo &STI,
175 return new X86MCCodeEmitter(MCII, Ctx);
178 /// isDisp8 - Return true if this signed displacement fits in a 8-bit
179 /// sign-extended field.
180 static bool isDisp8(int Value) {
181 return Value == (signed char)Value;
184 /// isCDisp8 - Return true if this signed displacement fits in a 8-bit
185 /// compressed dispacement field.
186 static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
187 assert(((TSFlags >> X86II::VEXShift) & X86II::EVEX) &&
188 "Compressed 8-bit displacement is only valid for EVEX inst.");
190 unsigned CD8E = (TSFlags >> X86II::EVEX_CD8EShift) & X86II::EVEX_CD8EMask;
191 unsigned CD8V = (TSFlags >> X86II::EVEX_CD8VShift) & X86II::EVEX_CD8VMask;
193 if (CD8V == 0 && CD8E == 0) {
195 return isDisp8(Value);
198 unsigned MemObjSize = 1U << CD8E;
200 // Fixed vector length
201 MemObjSize *= 1U << (CD8V & 0x3);
203 // Modified vector length
204 bool EVEX_b = (TSFlags >> X86II::VEXShift) & X86II::EVEX_B;
206 unsigned EVEX_LL = ((TSFlags >> X86II::VEXShift) & X86II::VEX_L) ? 1 : 0;
207 EVEX_LL += ((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2) ? 2 : 0;
208 assert(EVEX_LL < 3 && "");
210 unsigned NumElems = (1U << (EVEX_LL + 4)) / MemObjSize;
211 NumElems /= 1U << (CD8V & 0x3);
213 MemObjSize *= NumElems;
217 unsigned MemObjMask = MemObjSize - 1;
218 assert((MemObjSize & MemObjMask) == 0 && "Invalid memory object size.");
220 if (Value & MemObjMask) // Unaligned offset
223 bool Ret = (Value == (signed char)Value);
230 /// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
231 /// in an instruction with the specified TSFlags.
232 static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
233 unsigned Size = X86II::getSizeOfImm(TSFlags);
234 bool isPCRel = X86II::isImmPCRel(TSFlags);
236 if (X86II::isImmSigned(TSFlags)) {
238 default: llvm_unreachable("Unsupported signed fixup size!");
239 case 4: return MCFixupKind(X86::reloc_signed_4byte);
242 return MCFixup::getKindForSize(Size, isPCRel);
245 /// Is32BitMemOperand - Return true if the specified instruction has
246 /// a 32-bit memory operand. Op specifies the operand # of the memoperand.
247 static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
248 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
249 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
251 if ((BaseReg.getReg() != 0 &&
252 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) ||
253 (IndexReg.getReg() != 0 &&
254 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
259 /// Is64BitMemOperand - Return true if the specified instruction has
260 /// a 64-bit memory operand. Op specifies the operand # of the memoperand.
262 static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) {
263 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
264 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
266 if ((BaseReg.getReg() != 0 &&
267 X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) ||
268 (IndexReg.getReg() != 0 &&
269 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg())))
275 /// StartsWithGlobalOffsetTable - Check if this expression starts with
276 /// _GLOBAL_OFFSET_TABLE_ and if it is of the form
277 /// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF
278 /// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
279 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start
280 /// of a binary expression.
281 enum GlobalOffsetTableExprKind {
286 static GlobalOffsetTableExprKind
287 StartsWithGlobalOffsetTable(const MCExpr *Expr) {
288 const MCExpr *RHS = 0;
289 if (Expr->getKind() == MCExpr::Binary) {
290 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
295 if (Expr->getKind() != MCExpr::SymbolRef)
298 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
299 const MCSymbol &S = Ref->getSymbol();
300 if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
302 if (RHS && RHS->getKind() == MCExpr::SymbolRef)
307 static bool HasSecRelSymbolRef(const MCExpr *Expr) {
308 if (Expr->getKind() == MCExpr::SymbolRef) {
309 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr);
310 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
315 void X86MCCodeEmitter::
316 EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
317 MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS,
318 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const {
319 const MCExpr *Expr = NULL;
320 if (DispOp.isImm()) {
321 // If this is a simple integer displacement that doesn't require a
322 // relocation, emit it now.
323 if (FixupKind != FK_PCRel_1 &&
324 FixupKind != FK_PCRel_2 &&
325 FixupKind != FK_PCRel_4) {
326 EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS);
329 Expr = MCConstantExpr::Create(DispOp.getImm(), Ctx);
331 Expr = DispOp.getExpr();
334 // If we have an immoffset, add it to the expression.
335 if ((FixupKind == FK_Data_4 ||
336 FixupKind == FK_Data_8 ||
337 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
338 GlobalOffsetTableExprKind Kind = StartsWithGlobalOffsetTable(Expr);
339 if (Kind != GOT_None) {
340 assert(ImmOffset == 0);
342 FixupKind = MCFixupKind(X86::reloc_global_offset_table);
343 if (Kind == GOT_Normal)
345 } else if (Expr->getKind() == MCExpr::SymbolRef) {
346 if (HasSecRelSymbolRef(Expr)) {
347 FixupKind = MCFixupKind(FK_SecRel_4);
349 } else if (Expr->getKind() == MCExpr::Binary) {
350 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr*>(Expr);
351 if (HasSecRelSymbolRef(Bin->getLHS())
352 || HasSecRelSymbolRef(Bin->getRHS())) {
353 FixupKind = MCFixupKind(FK_SecRel_4);
358 // If the fixup is pc-relative, we need to bias the value to be relative to
359 // the start of the field, not the end of the field.
360 if (FixupKind == FK_PCRel_4 ||
361 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
362 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load))
364 if (FixupKind == FK_PCRel_2)
366 if (FixupKind == FK_PCRel_1)
370 Expr = MCBinaryExpr::CreateAdd(Expr, MCConstantExpr::Create(ImmOffset, Ctx),
373 // Emit a symbolic constant as a fixup and 4 zeros.
374 Fixups.push_back(MCFixup::Create(CurByte, Expr, FixupKind, Loc));
375 EmitConstant(0, Size, CurByte, OS);
378 void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
379 unsigned RegOpcodeField,
380 uint64_t TSFlags, unsigned &CurByte,
382 SmallVectorImpl<MCFixup> &Fixups,
383 const MCSubtargetInfo &STI) const{
384 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
385 const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg);
386 const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
387 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
388 unsigned BaseReg = Base.getReg();
389 bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
391 // Handle %rip relative addressing.
392 if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode
393 assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode");
394 assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
395 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
397 unsigned FixupKind = X86::reloc_riprel_4byte;
399 // movq loads are handled with a special relocation form which allows the
400 // linker to eliminate some loads for GOT references which end up in the
401 // same linkage unit.
402 if (MI.getOpcode() == X86::MOV64rm)
403 FixupKind = X86::reloc_riprel_4byte_movq_load;
405 // rip-relative addressing is actually relative to the *next* instruction.
406 // Since an immediate can follow the mod/rm byte for an instruction, this
407 // means that we need to bias the immediate field of the instruction with
408 // the size of the immediate field. If we have this case, add it into the
409 // expression to emit.
410 int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
412 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind),
413 CurByte, OS, Fixups, -ImmSize);
417 unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
419 // 16-bit addressing forms of the ModR/M byte have a different encoding for
420 // the R/M field and are far more limited in which registers can be used.
421 if (Is16BitMemOperand(MI, Op, STI)) {
423 // For 32-bit addressing, the row and column values in Table 2-2 are
424 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
425 // some special cases. And GetX86RegNum reflects that numbering.
426 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
427 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
428 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
429 // while values 0-3 indicate the allowed combinations (base+index) of
430 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
432 // R16Table[] is a lookup from the normal RegNo, to the row values from
433 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
434 static const unsigned R16Table[] = { 0, 0, 0, 7, 0, 6, 4, 5 };
435 unsigned RMfield = R16Table[BaseRegNo];
437 assert(RMfield && "invalid 16-bit base register");
439 if (IndexReg.getReg()) {
440 unsigned IndexReg16 = R16Table[GetX86RegNum(IndexReg)];
442 assert(IndexReg16 && "invalid 16-bit index register");
443 // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
444 assert(((IndexReg16 ^ RMfield) & 2) &&
445 "invalid 16-bit base/index register combination");
446 assert(Scale.getImm() == 1 &&
447 "invalid scale for 16-bit memory reference");
449 // Allow base/index to appear in either order (although GAS doesn't).
451 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
453 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
456 if (Disp.isImm() && isDisp8(Disp.getImm())) {
457 if (Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
458 // There is no displacement; just the register.
459 EmitByte(ModRMByte(0, RegOpcodeField, RMfield), CurByte, OS);
462 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
463 EmitByte(ModRMByte(1, RegOpcodeField, RMfield), CurByte, OS);
464 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
467 // This is the [REG]+disp16 case.
468 EmitByte(ModRMByte(2, RegOpcodeField, RMfield), CurByte, OS);
470 // There is no BaseReg; this is the plain [disp16] case.
471 EmitByte(ModRMByte(0, RegOpcodeField, 6), CurByte, OS);
474 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
475 EmitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups);
479 // Determine whether a SIB byte is needed.
480 // If no BaseReg, issue a RIP relative instruction only if the MCE can
481 // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
482 // 2-7) and absolute references.
484 if (// The SIB byte must be used if there is an index register.
485 IndexReg.getReg() == 0 &&
486 // The SIB byte must be used if the base is ESP/RSP/R12, all of which
487 // encode to an R/M value of 4, which indicates that a SIB byte is
489 BaseRegNo != N86::ESP &&
490 // If there is no base register and we're in 64-bit mode, we need a SIB
491 // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
492 (!is64BitMode(STI) || BaseReg != 0)) {
494 if (BaseReg == 0) { // [disp32] in X86-32 mode
495 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
496 EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups);
500 // If the base is not EBP/ESP and there is no displacement, use simple
501 // indirect register encoding, this handles addresses like [EAX]. The
502 // encoding for [EBP] with no displacement means [disp32] so we handle it
503 // by emitting a displacement of 0 below.
504 if (Disp.isImm() && Disp.getImm() == 0 && BaseRegNo != N86::EBP) {
505 EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS);
509 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
511 if (!HasEVEX && isDisp8(Disp.getImm())) {
512 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
513 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups);
516 // Try EVEX compressed 8-bit displacement first; if failed, fall back to
517 // 32-bit displacement.
519 if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
520 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
521 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups,
522 CDisp8 - Disp.getImm());
527 // Otherwise, emit the most general non-SIB encoding: [REG+disp32]
528 EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
529 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS,
534 // We need a SIB byte, so start by outputting the ModR/M byte first
535 assert(IndexReg.getReg() != X86::ESP &&
536 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
538 bool ForceDisp32 = false;
539 bool ForceDisp8 = false;
543 // If there is no base register, we emit the special case SIB byte with
544 // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
545 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
547 } else if (!Disp.isImm()) {
548 // Emit the normal disp32 encoding.
549 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
551 } else if (Disp.getImm() == 0 &&
552 // Base reg can't be anything that ends up with '5' as the base
553 // reg, it is the magic [*] nomenclature that indicates no base.
554 BaseRegNo != N86::EBP) {
555 // Emit no displacement ModR/M byte
556 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
557 } else if (!HasEVEX && isDisp8(Disp.getImm())) {
558 // Emit the disp8 encoding.
559 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
560 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
561 } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) {
562 // Emit the disp8 encoding.
563 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS);
564 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
565 ImmOffset = CDisp8 - Disp.getImm();
567 // Emit the normal disp32 encoding.
568 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
571 // Calculate what the SS field value should be...
572 static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 };
573 unsigned SS = SSTable[Scale.getImm()];
576 // Handle the SIB byte for the case where there is no base, see Intel
577 // Manual 2A, table 2-7. The displacement has already been output.
579 if (IndexReg.getReg())
580 IndexRegNo = GetX86RegNum(IndexReg);
581 else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5)
583 EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS);
586 if (IndexReg.getReg())
587 IndexRegNo = GetX86RegNum(IndexReg);
589 IndexRegNo = 4; // For example [ESP+1*<noreg>+4]
590 EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS);
593 // Do we need to output a displacement?
595 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset);
596 else if (ForceDisp32 || Disp.getImm() != 0)
597 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
598 CurByte, OS, Fixups);
601 /// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
603 void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
604 int MemOperand, const MCInst &MI,
605 const MCInstrDesc &Desc,
606 raw_ostream &OS) const {
607 bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
608 bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
609 bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
610 bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
611 bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
612 bool HasEVEX_RC = (TSFlags >> X86II::VEXShift) & X86II::EVEX_RC;
614 // VEX_R: opcode externsion equivalent to REX.R in
615 // 1's complement (inverted) form
617 // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
618 // 0: Same as REX_R=1 (64 bit mode only)
620 unsigned char VEX_R = 0x1;
621 unsigned char EVEX_R2 = 0x1;
623 // VEX_X: equivalent to REX.X, only used when a
624 // register is used for index in SIB Byte.
626 // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
627 // 0: Same as REX.X=1 (64-bit mode only)
628 unsigned char VEX_X = 0x1;
632 // 1: Same as REX_B=0 (ignored in 32-bit mode)
633 // 0: Same as REX_B=1 (64 bit mode only)
635 unsigned char VEX_B = 0x1;
637 // VEX_W: opcode specific (use like REX.W, or used for
638 // opcode extension, or ignored, depending on the opcode byte)
639 unsigned char VEX_W = 0;
641 // XOP: Use XOP prefix byte 0x8f instead of VEX.
642 bool XOP = (TSFlags >> X86II::VEXShift) & X86II::XOP;
644 // VEX_5M (VEX m-mmmmm field):
646 // 0b00000: Reserved for future use
647 // 0b00001: implied 0F leading opcode
648 // 0b00010: implied 0F 38 leading opcode bytes
649 // 0b00011: implied 0F 3A leading opcode bytes
650 // 0b00100-0b11111: Reserved for future use
651 // 0b01000: XOP map select - 08h instructions with imm byte
652 // 0b01001: XOP map select - 09h instructions with no imm byte
653 // 0b01010: XOP map select - 0Ah instructions with imm dword
654 unsigned char VEX_5M = 0;
656 // VEX_4V (VEX vvvv field): a register specifier
657 // (in 1's complement form) or 1111 if unused.
658 unsigned char VEX_4V = 0xf;
659 unsigned char EVEX_V2 = 0x1;
661 // VEX_L (Vector Length):
663 // 0: scalar or 128-bit vector
666 unsigned char VEX_L = 0;
667 unsigned char EVEX_L2 = 0;
669 // VEX_PP: opcode extension providing equivalent
670 // functionality of a SIMD prefix
677 unsigned char VEX_PP = 0;
680 unsigned char EVEX_U = 1; // Always '1' so far
683 unsigned char EVEX_z = 0;
686 unsigned char EVEX_b = 0;
689 unsigned char EVEX_rc = 0;
692 unsigned char EVEX_aaa = 0;
694 bool EncodeRC = false;
696 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W)
699 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
701 if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2))
704 if (HasEVEX_K && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_Z))
707 if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_B))
710 switch (TSFlags & X86II::OpPrefixMask) {
711 default: break; // VEX_PP already correct
712 case X86II::PD: VEX_PP = 0x1; break; // 66
713 case X86II::XS: VEX_PP = 0x2; break; // F3
714 case X86II::XD: VEX_PP = 0x3; break; // F2
717 switch (TSFlags & X86II::OpMapMask) {
718 default: llvm_unreachable("Invalid prefix!");
719 case X86II::TB: VEX_5M = 0x1; break; // 0F
720 case X86II::T8: VEX_5M = 0x2; break; // 0F 38
721 case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
722 case X86II::XOP8: VEX_5M = 0x8; break;
723 case X86II::XOP9: VEX_5M = 0x9; break;
724 case X86II::XOPA: VEX_5M = 0xA; break;
727 // Classify VEX_B, VEX_4V, VEX_R, VEX_X
728 unsigned NumOps = Desc.getNumOperands();
729 unsigned CurOp = X86II::getOperandBias(Desc);
731 switch (TSFlags & X86II::FormMask) {
732 default: llvm_unreachable("Unexpected form in EmitVEXOpcodePrefix!");
735 case X86II::MRMDestMem: {
736 // MRMDestMem instructions forms:
737 // MemAddr, src1(ModR/M)
738 // MemAddr, src1(VEX_4V), src2(ModR/M)
739 // MemAddr, src1(ModR/M), imm8
741 if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
742 X86::AddrBaseReg).getReg()))
744 if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
745 X86::AddrIndexReg).getReg()))
747 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand +
748 X86::AddrIndexReg).getReg()))
751 CurOp += X86::AddrNumOperands;
754 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
757 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
758 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
763 const MCOperand &MO = MI.getOperand(CurOp);
765 if (X86II::isX86_64ExtendedReg(MO.getReg()))
767 if (HasEVEX && X86II::is32ExtendedReg(MO.getReg()))
772 case X86II::MRMSrcMem:
773 // MRMSrcMem instructions forms:
774 // src1(ModR/M), MemAddr
775 // src1(ModR/M), src2(VEX_4V), MemAddr
776 // src1(ModR/M), MemAddr, imm8
777 // src1(ModR/M), MemAddr, src2(VEX_I8IMM)
780 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
781 // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
782 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
784 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
789 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
792 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
793 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
798 if (X86II::isX86_64ExtendedReg(
799 MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
801 if (X86II::isX86_64ExtendedReg(
802 MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
804 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand +
805 X86::AddrIndexReg).getReg()))
809 // Instruction format for 4VOp3:
810 // src1(ModR/M), MemAddr, src3(VEX_4V)
811 // CurOp points to start of the MemoryOperand,
812 // it skips TIED_TO operands if exist, then increments past src1.
813 // CurOp + X86::AddrNumOperands will point to src3.
814 VEX_4V = getVEXRegisterEncoding(MI, CurOp+X86::AddrNumOperands);
816 case X86II::MRM0m: case X86II::MRM1m:
817 case X86II::MRM2m: case X86II::MRM3m:
818 case X86II::MRM4m: case X86II::MRM5m:
819 case X86II::MRM6m: case X86II::MRM7m: {
820 // MRM[0-9]m instructions forms:
822 // src1(VEX_4V), MemAddr
824 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
825 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
831 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
833 if (X86II::isX86_64ExtendedReg(
834 MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
836 if (X86II::isX86_64ExtendedReg(
837 MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
841 case X86II::MRMSrcReg:
842 // MRMSrcReg instructions forms:
843 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
844 // dst(ModR/M), src1(ModR/M)
845 // dst(ModR/M), src1(ModR/M), imm8
848 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
849 // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
850 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
852 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
857 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
860 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
861 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
866 if (HasMemOp4) // Skip second register source (encoded in I8IMM)
869 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
871 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
875 VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
878 unsigned RcOperand = NumOps-1;
879 assert(RcOperand >= CurOp);
880 EVEX_rc = MI.getOperand(RcOperand).getImm() & 0x3;
885 case X86II::MRMDestReg:
886 // MRMDestReg instructions forms:
887 // dst(ModR/M), src(ModR/M)
888 // dst(ModR/M), src(ModR/M), imm8
889 // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
890 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
892 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
897 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
900 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
901 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
906 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
908 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
913 case X86II::MRM0r: case X86II::MRM1r:
914 case X86II::MRM2r: case X86II::MRM3r:
915 case X86II::MRM4r: case X86II::MRM5r:
916 case X86II::MRM6r: case X86II::MRM7r:
917 // MRM0r-MRM7r instructions forms:
918 // dst(VEX_4V), src(ModR/M), imm8
920 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
921 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
926 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
928 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
930 if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
936 // VEX opcode prefix can have 2 or 3 bytes
939 // +-----+ +--------------+ +-------------------+
940 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
941 // +-----+ +--------------+ +-------------------+
943 // +-----+ +-------------------+
944 // | C5h | | R | vvvv | L | pp |
945 // +-----+ +-------------------+
947 unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
949 if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix
950 EmitByte(0xC5, CurByte, OS);
951 EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
956 EmitByte(XOP ? 0x8F : 0xC4, CurByte, OS);
957 EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
958 EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
960 // EVEX opcode prefix can have 4 bytes
962 // +-----+ +--------------+ +-------------------+ +------------------------+
963 // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa |
964 // +-----+ +--------------+ +-------------------+ +------------------------+
965 assert((VEX_5M & 0x3) == VEX_5M
966 && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!");
970 EmitByte(0x62, CurByte, OS);
971 EmitByte((VEX_R << 7) |
975 VEX_5M, CurByte, OS);
976 EmitByte((VEX_W << 7) |
979 VEX_PP, CurByte, OS);
981 EmitByte((EVEX_z << 7) |
985 EVEX_aaa, CurByte, OS);
987 EmitByte((EVEX_z << 7) |
992 EVEX_aaa, CurByte, OS);
996 /// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
997 /// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
998 /// size, and 3) use of X86-64 extended registers.
999 static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
1000 const MCInstrDesc &Desc) {
1002 if (TSFlags & X86II::REX_W)
1003 REX |= 1 << 3; // set REX.W
1005 if (MI.getNumOperands() == 0) return REX;
1007 unsigned NumOps = MI.getNumOperands();
1008 // FIXME: MCInst should explicitize the two-addrness.
1009 bool isTwoAddr = NumOps > 1 &&
1010 Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
1012 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
1013 unsigned i = isTwoAddr ? 1 : 0;
1014 for (; i != NumOps; ++i) {
1015 const MCOperand &MO = MI.getOperand(i);
1016 if (!MO.isReg()) continue;
1017 unsigned Reg = MO.getReg();
1018 if (!X86II::isX86_64NonExtLowByteReg(Reg)) continue;
1019 // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
1020 // that returns non-zero.
1021 REX |= 0x40; // REX fixed encoding prefix
1025 switch (TSFlags & X86II::FormMask) {
1026 case X86II::MRMSrcReg:
1027 if (MI.getOperand(0).isReg() &&
1028 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
1029 REX |= 1 << 2; // set REX.R
1030 i = isTwoAddr ? 2 : 1;
1031 for (; i != NumOps; ++i) {
1032 const MCOperand &MO = MI.getOperand(i);
1033 if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
1034 REX |= 1 << 0; // set REX.B
1037 case X86II::MRMSrcMem: {
1038 if (MI.getOperand(0).isReg() &&
1039 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
1040 REX |= 1 << 2; // set REX.R
1042 i = isTwoAddr ? 2 : 1;
1043 for (; i != NumOps; ++i) {
1044 const MCOperand &MO = MI.getOperand(i);
1046 if (X86II::isX86_64ExtendedReg(MO.getReg()))
1047 REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1)
1053 case X86II::MRM0m: case X86II::MRM1m:
1054 case X86II::MRM2m: case X86II::MRM3m:
1055 case X86II::MRM4m: case X86II::MRM5m:
1056 case X86II::MRM6m: case X86II::MRM7m:
1057 case X86II::MRMDestMem: {
1058 unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
1059 i = isTwoAddr ? 1 : 0;
1060 if (NumOps > e && MI.getOperand(e).isReg() &&
1061 X86II::isX86_64ExtendedReg(MI.getOperand(e).getReg()))
1062 REX |= 1 << 2; // set REX.R
1064 for (; i != e; ++i) {
1065 const MCOperand &MO = MI.getOperand(i);
1067 if (X86II::isX86_64ExtendedReg(MO.getReg()))
1068 REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1)
1075 if (MI.getOperand(0).isReg() &&
1076 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
1077 REX |= 1 << 0; // set REX.B
1078 i = isTwoAddr ? 2 : 1;
1079 for (unsigned e = NumOps; i != e; ++i) {
1080 const MCOperand &MO = MI.getOperand(i);
1081 if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
1082 REX |= 1 << 2; // set REX.R
1089 /// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
1090 void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte,
1091 unsigned SegOperand,
1093 raw_ostream &OS) const {
1094 // Check for explicit segment override on memory operand.
1095 switch (MI.getOperand(SegOperand).getReg()) {
1096 default: llvm_unreachable("Unknown segment register!");
1098 case X86::CS: EmitByte(0x2E, CurByte, OS); break;
1099 case X86::SS: EmitByte(0x36, CurByte, OS); break;
1100 case X86::DS: EmitByte(0x3E, CurByte, OS); break;
1101 case X86::ES: EmitByte(0x26, CurByte, OS); break;
1102 case X86::FS: EmitByte(0x64, CurByte, OS); break;
1103 case X86::GS: EmitByte(0x65, CurByte, OS); break;
1107 /// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
1109 /// MemOperand is the operand # of the start of a memory operand if present. If
1110 /// Not present, it is -1.
1111 void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
1112 int MemOperand, const MCInst &MI,
1113 const MCInstrDesc &Desc,
1114 const MCSubtargetInfo &STI,
1115 raw_ostream &OS) const {
1117 // Emit the operand size opcode prefix as needed.
1118 if (TSFlags & (is16BitMode(STI) ? X86II::OpSize16 : X86II::OpSize))
1119 EmitByte(0x66, CurByte, OS);
1121 switch (TSFlags & X86II::OpPrefixMask) {
1122 case X86II::PD: // 66
1123 EmitByte(0x66, CurByte, OS);
1125 case X86II::XS: // F3
1126 EmitByte(0xF3, CurByte, OS);
1128 case X86II::XD: // F2
1129 EmitByte(0xF2, CurByte, OS);
1133 // Handle REX prefix.
1134 // FIXME: Can this come before F2 etc to simplify emission?
1135 if (is64BitMode(STI)) {
1136 if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc))
1137 EmitByte(0x40 | REX, CurByte, OS);
1140 // 0x0F escape code must be emitted just before the opcode.
1141 switch (TSFlags & X86II::OpMapMask) {
1142 case X86II::TB: // Two-byte opcode map
1143 case X86II::T8: // 0F 38
1144 case X86II::TA: // 0F 3A
1145 case X86II::A6: // 0F A6
1146 case X86II::A7: // 0F A7
1147 EmitByte(0x0F, CurByte, OS);
1149 case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB:
1150 case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF:
1151 EmitByte(0xD8+(((TSFlags & X86II::OpMapMask) - X86II::D8) >>
1152 X86II::OpMapShift), CurByte, OS);
1156 switch (TSFlags & X86II::OpMapMask) {
1157 case X86II::T8: // 0F 38
1158 EmitByte(0x38, CurByte, OS);
1160 case X86II::TA: // 0F 3A
1161 EmitByte(0x3A, CurByte, OS);
1163 case X86II::A6: // 0F A6
1164 EmitByte(0xA6, CurByte, OS);
1166 case X86II::A7: // 0F A7
1167 EmitByte(0xA7, CurByte, OS);
1172 void X86MCCodeEmitter::
1173 EncodeInstruction(const MCInst &MI, raw_ostream &OS,
1174 SmallVectorImpl<MCFixup> &Fixups,
1175 const MCSubtargetInfo &STI) const {
1176 unsigned Opcode = MI.getOpcode();
1177 const MCInstrDesc &Desc = MCII.get(Opcode);
1178 uint64_t TSFlags = Desc.TSFlags;
1180 // Pseudo instructions don't get encoded.
1181 if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
1184 unsigned NumOps = Desc.getNumOperands();
1185 unsigned CurOp = X86II::getOperandBias(Desc);
1187 // Keep track of the current byte being emitted.
1188 unsigned CurByte = 0;
1190 // Is this instruction encoded using the AVX VEX prefix?
1191 bool HasVEXPrefix = (TSFlags >> X86II::VEXShift) & X86II::VEX;
1193 // It uses the VEX.VVVV field?
1194 bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
1195 bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
1196 bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
1197 const unsigned MemOp4_I8IMMOperand = 2;
1199 // It uses the EVEX.aaa field?
1200 bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX;
1201 bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
1202 bool HasEVEX_RC = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_RC);
1204 // Determine where the memory operand starts, if present.
1205 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
1206 if (MemoryOperand != -1) MemoryOperand += CurOp;
1208 // Emit the lock opcode prefix as needed.
1209 if (TSFlags & X86II::LOCK)
1210 EmitByte(0xF0, CurByte, OS);
1212 // Emit segment override opcode prefix as needed.
1213 if (MemoryOperand >= 0)
1214 EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg,
1217 // Emit the repeat opcode prefix as needed.
1218 if (TSFlags & X86II::REP)
1219 EmitByte(0xF3, CurByte, OS);
1221 // Emit the address size opcode prefix as needed.
1222 bool need_address_override;
1223 // The AdSize prefix is only for 32-bit and 64-bit modes. Hm, perhaps we
1224 // should introduce an AdSize16 bit instead of having seven special cases?
1225 if ((!is16BitMode(STI) && TSFlags & X86II::AdSize) ||
1226 (is16BitMode(STI) && (MI.getOpcode() == X86::JECXZ_32 ||
1227 MI.getOpcode() == X86::MOV8o8a ||
1228 MI.getOpcode() == X86::MOV16o16a ||
1229 MI.getOpcode() == X86::MOV32o32a ||
1230 MI.getOpcode() == X86::MOV8ao8 ||
1231 MI.getOpcode() == X86::MOV16ao16 ||
1232 MI.getOpcode() == X86::MOV32ao32))) {
1233 need_address_override = true;
1234 } else if (MemoryOperand < 0) {
1235 need_address_override = false;
1236 } else if (is64BitMode(STI)) {
1237 assert(!Is16BitMemOperand(MI, MemoryOperand, STI));
1238 need_address_override = Is32BitMemOperand(MI, MemoryOperand);
1239 } else if (is32BitMode(STI)) {
1240 assert(!Is64BitMemOperand(MI, MemoryOperand));
1241 need_address_override = Is16BitMemOperand(MI, MemoryOperand, STI);
1243 assert(is16BitMode(STI));
1244 assert(!Is64BitMemOperand(MI, MemoryOperand));
1245 need_address_override = !Is16BitMemOperand(MI, MemoryOperand, STI);
1248 if (need_address_override)
1249 EmitByte(0x67, CurByte, OS);
1252 EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
1254 EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
1256 unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
1258 if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
1259 BaseOpcode = 0x0F; // Weird 3DNow! encoding.
1261 unsigned SrcRegNum = 0;
1262 switch (TSFlags & X86II::FormMask) {
1263 default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
1264 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
1266 llvm_unreachable("Pseudo instruction shouldn't be emitted");
1267 case X86II::RawFrmDstSrc: {
1268 unsigned siReg = MI.getOperand(1).getReg();
1269 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||
1270 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||
1271 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&
1272 "SI and DI register sizes do not match");
1273 // Emit segment override opcode prefix as needed (not for %ds).
1274 if (MI.getOperand(2).getReg() != X86::DS)
1275 EmitSegmentOverridePrefix(CurByte, 2, MI, OS);
1276 // Emit OpSize prefix as needed.
1277 if ((!is32BitMode(STI) && siReg == X86::ESI) ||
1278 (is32BitMode(STI) && siReg == X86::SI))
1279 EmitByte(0x67, CurByte, OS);
1280 CurOp += 3; // Consume operands.
1281 EmitByte(BaseOpcode, CurByte, OS);
1284 case X86II::RawFrmSrc: {
1285 unsigned siReg = MI.getOperand(0).getReg();
1286 // Emit segment override opcode prefix as needed (not for %ds).
1287 if (MI.getOperand(1).getReg() != X86::DS)
1288 EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
1289 // Emit OpSize prefix as needed.
1290 if ((!is32BitMode(STI) && siReg == X86::ESI) ||
1291 (is32BitMode(STI) && siReg == X86::SI))
1292 EmitByte(0x67, CurByte, OS);
1293 CurOp += 2; // Consume operands.
1294 EmitByte(BaseOpcode, CurByte, OS);
1297 case X86II::RawFrmDst: {
1298 unsigned siReg = MI.getOperand(0).getReg();
1299 // Emit OpSize prefix as needed.
1300 if ((!is32BitMode(STI) && siReg == X86::EDI) ||
1301 (is32BitMode(STI) && siReg == X86::DI))
1302 EmitByte(0x67, CurByte, OS);
1303 ++CurOp; // Consume operand.
1304 EmitByte(BaseOpcode, CurByte, OS);
1308 EmitByte(BaseOpcode, CurByte, OS);
1310 case X86II::RawFrmMemOffs:
1311 // Emit segment override opcode prefix as needed.
1312 EmitSegmentOverridePrefix(CurByte, 1, MI, OS);
1313 EmitByte(BaseOpcode, CurByte, OS);
1314 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1315 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1316 CurByte, OS, Fixups);
1317 ++CurOp; // skip segment operand
1319 case X86II::RawFrmImm8:
1320 EmitByte(BaseOpcode, CurByte, OS);
1321 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1322 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1323 CurByte, OS, Fixups);
1324 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte,
1327 case X86II::RawFrmImm16:
1328 EmitByte(BaseOpcode, CurByte, OS);
1329 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1330 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1331 CurByte, OS, Fixups);
1332 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte,
1336 case X86II::AddRegFrm:
1337 EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
1340 case X86II::MRMDestReg:
1341 EmitByte(BaseOpcode, CurByte, OS);
1342 SrcRegNum = CurOp + 1;
1344 if (HasEVEX_K) // Skip writemask
1347 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1350 EmitRegModRMByte(MI.getOperand(CurOp),
1351 GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS);
1352 CurOp = SrcRegNum + 1;
1355 case X86II::MRMDestMem:
1356 EmitByte(BaseOpcode, CurByte, OS);
1357 SrcRegNum = CurOp + X86::AddrNumOperands;
1359 if (HasEVEX_K) // Skip writemask
1362 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1365 EmitMemModRMByte(MI, CurOp,
1366 GetX86RegNum(MI.getOperand(SrcRegNum)),
1367 TSFlags, CurByte, OS, Fixups, STI);
1368 CurOp = SrcRegNum + 1;
1371 case X86II::MRMSrcReg:
1372 EmitByte(BaseOpcode, CurByte, OS);
1373 SrcRegNum = CurOp + 1;
1375 if (HasEVEX_K) // Skip writemask
1378 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1381 if (HasMemOp4) // Skip 2nd src (which is encoded in I8IMM)
1384 EmitRegModRMByte(MI.getOperand(SrcRegNum),
1385 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1387 // 2 operands skipped with HasMemOp4, compensate accordingly
1388 CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
1391 // do not count the rounding control operand
1396 case X86II::MRMSrcMem: {
1397 int AddrOperands = X86::AddrNumOperands;
1398 unsigned FirstMemOp = CurOp+1;
1400 if (HasEVEX_K) { // Skip writemask
1407 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1409 if (HasMemOp4) // Skip second register source (encoded in I8IMM)
1412 EmitByte(BaseOpcode, CurByte, OS);
1414 EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1415 TSFlags, CurByte, OS, Fixups, STI);
1416 CurOp += AddrOperands + 1;
1422 case X86II::MRM0r: case X86II::MRM1r:
1423 case X86II::MRM2r: case X86II::MRM3r:
1424 case X86II::MRM4r: case X86II::MRM5r:
1425 case X86II::MRM6r: case X86II::MRM7r:
1426 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1428 EmitByte(BaseOpcode, CurByte, OS);
1429 EmitRegModRMByte(MI.getOperand(CurOp++),
1430 (TSFlags & X86II::FormMask)-X86II::MRM0r,
1433 case X86II::MRM0m: case X86II::MRM1m:
1434 case X86II::MRM2m: case X86II::MRM3m:
1435 case X86II::MRM4m: case X86II::MRM5m:
1436 case X86II::MRM6m: case X86II::MRM7m:
1437 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1439 EmitByte(BaseOpcode, CurByte, OS);
1440 EmitMemModRMByte(MI, CurOp, (TSFlags & X86II::FormMask)-X86II::MRM0m,
1441 TSFlags, CurByte, OS, Fixups, STI);
1442 CurOp += X86::AddrNumOperands;
1444 case X86II::MRM_C1: case X86II::MRM_C2: case X86II::MRM_C3:
1445 case X86II::MRM_C4: case X86II::MRM_C8: case X86II::MRM_C9:
1446 case X86II::MRM_CA: case X86II::MRM_CB: case X86II::MRM_D0:
1447 case X86II::MRM_D1: case X86II::MRM_D4: case X86II::MRM_D5:
1448 case X86II::MRM_D6: case X86II::MRM_D8: case X86II::MRM_D9:
1449 case X86II::MRM_DA: case X86II::MRM_DB: case X86II::MRM_DC:
1450 case X86II::MRM_DD: case X86II::MRM_DE: case X86II::MRM_DF:
1451 case X86II::MRM_E8: case X86II::MRM_F0: case X86II::MRM_F8:
1453 EmitByte(BaseOpcode, CurByte, OS);
1456 switch (TSFlags & X86II::FormMask) {
1457 default: llvm_unreachable("Invalid Form");
1458 case X86II::MRM_C1: MRM = 0xC1; break;
1459 case X86II::MRM_C2: MRM = 0xC2; break;
1460 case X86II::MRM_C3: MRM = 0xC3; break;
1461 case X86II::MRM_C4: MRM = 0xC4; break;
1462 case X86II::MRM_C8: MRM = 0xC8; break;
1463 case X86II::MRM_C9: MRM = 0xC9; break;
1464 case X86II::MRM_CA: MRM = 0xCA; break;
1465 case X86II::MRM_CB: MRM = 0xCB; break;
1466 case X86II::MRM_D0: MRM = 0xD0; break;
1467 case X86II::MRM_D1: MRM = 0xD1; break;
1468 case X86II::MRM_D4: MRM = 0xD4; break;
1469 case X86II::MRM_D5: MRM = 0xD5; break;
1470 case X86II::MRM_D6: MRM = 0xD6; break;
1471 case X86II::MRM_D8: MRM = 0xD8; break;
1472 case X86II::MRM_D9: MRM = 0xD9; break;
1473 case X86II::MRM_DA: MRM = 0xDA; break;
1474 case X86II::MRM_DB: MRM = 0xDB; break;
1475 case X86II::MRM_DC: MRM = 0xDC; break;
1476 case X86II::MRM_DD: MRM = 0xDD; break;
1477 case X86II::MRM_DE: MRM = 0xDE; break;
1478 case X86II::MRM_DF: MRM = 0xDF; break;
1479 case X86II::MRM_E8: MRM = 0xE8; break;
1480 case X86II::MRM_F0: MRM = 0xF0; break;
1481 case X86II::MRM_F8: MRM = 0xF8; break;
1482 case X86II::MRM_F9: MRM = 0xF9; break;
1484 EmitByte(MRM, CurByte, OS);
1488 // If there is a remaining operand, it must be a trailing immediate. Emit it
1489 // according to the right size for the instruction. Some instructions
1490 // (SSE4a extrq and insertq) have two trailing immediates.
1491 while (CurOp != NumOps && NumOps - CurOp <= 2) {
1492 // The last source register of a 4 operand instruction in AVX is encoded
1493 // in bits[7:4] of a immediate byte.
1494 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM) {
1495 const MCOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
1498 unsigned RegNum = GetX86RegNum(MO) << 4;
1499 if (X86II::isX86_64ExtendedReg(MO.getReg()))
1501 // If there is an additional 5th operand it must be an immediate, which
1502 // is encoded in bits[3:0]
1503 if (CurOp != NumOps) {
1504 const MCOperand &MIMM = MI.getOperand(CurOp++);
1506 unsigned Val = MIMM.getImm();
1507 assert(Val < 16 && "Immediate operand value out of range");
1511 EmitImmediate(MCOperand::CreateImm(RegNum), MI.getLoc(), 1, FK_Data_1,
1512 CurByte, OS, Fixups);
1514 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1515 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1516 CurByte, OS, Fixups);
1520 if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
1521 EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS);
1525 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
1526 errs() << "Cannot encode all operands of: ";