1 //===-- X86/X86CodeEmitter.cpp - Convert X86 code to machine code ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the pass that transforms the X86 machine instructions into
11 // relocatable machine code.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-emitter"
16 #include "X86InstrInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "X86Relocations.h"
21 #include "llvm/PassManager.h"
22 #include "llvm/CodeGen/MachineCodeEmitter.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/Function.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Support/Compiler.h"
29 #include "llvm/Target/TargetOptions.h"
32 STATISTIC(NumEmitted, "Number of machine instructions emitted");
35 class VISIBILITY_HIDDEN Emitter : public MachineFunctionPass {
36 const X86InstrInfo *II;
39 MachineCodeEmitter &MCE;
45 explicit Emitter(TargetMachine &tm, MachineCodeEmitter &mce)
46 : MachineFunctionPass((intptr_t)&ID), II(0), TD(0), TM(tm),
47 MCE(mce), PICBase(0), Is64BitMode(false),
48 IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
49 Emitter(TargetMachine &tm, MachineCodeEmitter &mce,
50 const X86InstrInfo &ii, const TargetData &td, bool is64)
51 : MachineFunctionPass((intptr_t)&ID), II(&ii), TD(&td), TM(tm),
52 MCE(mce), PICBase(0), Is64BitMode(is64),
53 IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
55 bool runOnMachineFunction(MachineFunction &MF);
57 virtual const char *getPassName() const {
58 return "X86 Machine Code Emitter";
61 void emitInstruction(const MachineInstr &MI);
64 void emitPCRelativeBlockAddress(MachineBasicBlock *MBB);
65 void emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
66 int Disp = 0, intptr_t PCAdj = 0,
67 bool NeedStub = false, bool IsLazy = false);
68 void emitExternalSymbolAddress(const char *ES, unsigned Reloc);
69 void emitConstPoolAddress(unsigned CPI, unsigned Reloc, int Disp = 0,
71 void emitJumpTableAddress(unsigned JTI, unsigned Reloc,
74 void emitDisplacementField(const MachineOperand *RelocOp, int DispVal,
77 void emitRegModRMByte(unsigned ModRMReg, unsigned RegOpcodeField);
78 void emitSIBByte(unsigned SS, unsigned Index, unsigned Base);
79 void emitConstant(uint64_t Val, unsigned Size);
81 void emitMemModRMByte(const MachineInstr &MI,
82 unsigned Op, unsigned RegOpcodeField,
85 unsigned getX86RegNum(unsigned RegNo);
86 bool isX86_64ExtendedReg(const MachineOperand &MO);
87 unsigned determineREX(const MachineInstr &MI);
89 bool gvNeedsLazyPtr(const GlobalValue *GV);
94 /// createX86CodeEmitterPass - Return a pass that emits the collected X86 code
95 /// to the specified MCE object.
96 FunctionPass *llvm::createX86CodeEmitterPass(X86TargetMachine &TM,
97 MachineCodeEmitter &MCE) {
98 return new Emitter(TM, MCE);
101 bool Emitter::runOnMachineFunction(MachineFunction &MF) {
102 assert((MF.getTarget().getRelocationModel() != Reloc::Default ||
103 MF.getTarget().getRelocationModel() != Reloc::Static) &&
104 "JIT relocation model must be set to static or default!");
105 II = ((X86TargetMachine&)TM).getInstrInfo();
106 TD = ((X86TargetMachine&)TM).getTargetData();
107 Is64BitMode = TM.getSubtarget<X86Subtarget>().is64Bit();
110 MCE.startFunction(MF);
111 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
113 MCE.StartMachineBasicBlock(MBB);
114 for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
118 } while (MCE.finishFunction(MF));
123 /// emitPCRelativeBlockAddress - This method keeps track of the information
124 /// necessary to resolve the address of this block later and emits a dummy
127 void Emitter::emitPCRelativeBlockAddress(MachineBasicBlock *MBB) {
128 // Remember where this reference was and where it is to so we can
129 // deal with it later.
130 MCE.addRelocation(MachineRelocation::getBB(MCE.getCurrentPCOffset(),
131 X86::reloc_pcrel_word, MBB));
135 /// emitGlobalAddress - Emit the specified address to the code stream assuming
136 /// this is part of a "take the address of a global" instruction.
138 void Emitter::emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
139 int Disp /* = 0 */, intptr_t PCAdj /* = 0 */,
140 bool NeedStub /* = false */,
141 bool isLazy /* = false */) {
142 intptr_t RelocCST = 0;
143 if (Reloc == X86::reloc_picrel_word)
145 else if (Reloc == X86::reloc_pcrel_word)
147 MachineRelocation MR = isLazy
148 ? MachineRelocation::getGVLazyPtr(MCE.getCurrentPCOffset(), Reloc,
149 GV, RelocCST, NeedStub)
150 : MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
151 GV, RelocCST, NeedStub);
152 MCE.addRelocation(MR);
153 if (Reloc == X86::reloc_absolute_dword)
155 MCE.emitWordLE(Disp); // The relocated value will be added to the displacement
158 /// emitExternalSymbolAddress - Arrange for the address of an external symbol to
159 /// be emitted to the current location in the function, and allow it to be PC
161 void Emitter::emitExternalSymbolAddress(const char *ES, unsigned Reloc) {
162 intptr_t RelocCST = (Reloc == X86::reloc_picrel_word) ? PICBase : 0;
163 MCE.addRelocation(MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
164 Reloc, ES, RelocCST));
165 if (Reloc == X86::reloc_absolute_dword)
170 /// emitConstPoolAddress - Arrange for the address of an constant pool
171 /// to be emitted to the current location in the function, and allow it to be PC
173 void Emitter::emitConstPoolAddress(unsigned CPI, unsigned Reloc,
175 intptr_t PCAdj /* = 0 */) {
176 intptr_t RelocCST = 0;
177 if (Reloc == X86::reloc_picrel_word)
179 else if (Reloc == X86::reloc_pcrel_word)
181 MCE.addRelocation(MachineRelocation::getConstPool(MCE.getCurrentPCOffset(),
182 Reloc, CPI, RelocCST));
183 if (Reloc == X86::reloc_absolute_dword)
185 MCE.emitWordLE(Disp); // The relocated value will be added to the displacement
188 /// emitJumpTableAddress - Arrange for the address of a jump table to
189 /// be emitted to the current location in the function, and allow it to be PC
191 void Emitter::emitJumpTableAddress(unsigned JTI, unsigned Reloc,
192 intptr_t PCAdj /* = 0 */) {
193 intptr_t RelocCST = 0;
194 if (Reloc == X86::reloc_picrel_word)
196 else if (Reloc == X86::reloc_pcrel_word)
198 MCE.addRelocation(MachineRelocation::getJumpTable(MCE.getCurrentPCOffset(),
199 Reloc, JTI, RelocCST));
200 if (Reloc == X86::reloc_absolute_dword)
202 MCE.emitWordLE(0); // The relocated value will be added to the displacement
205 unsigned Emitter::getX86RegNum(unsigned RegNo) {
206 return ((X86RegisterInfo&)II->getRegisterInfo()).getX86RegNum(RegNo);
209 inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
211 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
212 return RM | (RegOpcode << 3) | (Mod << 6);
215 void Emitter::emitRegModRMByte(unsigned ModRMReg, unsigned RegOpcodeFld){
216 MCE.emitByte(ModRMByte(3, RegOpcodeFld, getX86RegNum(ModRMReg)));
219 void Emitter::emitSIBByte(unsigned SS, unsigned Index, unsigned Base) {
220 // SIB byte is in the same format as the ModRMByte...
221 MCE.emitByte(ModRMByte(SS, Index, Base));
224 void Emitter::emitConstant(uint64_t Val, unsigned Size) {
225 // Output the constant in little endian byte order...
226 for (unsigned i = 0; i != Size; ++i) {
227 MCE.emitByte(Val & 255);
232 /// isDisp8 - Return true if this signed displacement fits in a 8-bit
233 /// sign-extended field.
234 static bool isDisp8(int Value) {
235 return Value == (signed char)Value;
238 bool Emitter::gvNeedsLazyPtr(const GlobalValue *GV) {
239 return !Is64BitMode &&
240 TM.getSubtarget<X86Subtarget>().GVRequiresExtraLoad(GV, TM, false);
243 void Emitter::emitDisplacementField(const MachineOperand *RelocOp,
244 int DispVal, intptr_t PCAdj) {
245 // If this is a simple integer displacement that doesn't require a relocation,
248 emitConstant(DispVal, 4);
252 // Otherwise, this is something that requires a relocation. Emit it as such
254 if (RelocOp->isGlobalAddress()) {
255 // In 64-bit static small code model, we could potentially emit absolute.
256 // But it's probably not beneficial.
257 // 89 05 00 00 00 00 mov %eax,0(%rip) # PC-relative
258 // 89 04 25 00 00 00 00 mov %eax,0x0 # Absolute
259 unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
260 : (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
261 bool NeedStub = isa<Function>(RelocOp->getGlobal());
262 bool isLazy = gvNeedsLazyPtr(RelocOp->getGlobal());
263 emitGlobalAddress(RelocOp->getGlobal(), rt, RelocOp->getOffset(),
264 PCAdj, NeedStub, isLazy);
265 } else if (RelocOp->isConstantPoolIndex()) {
266 unsigned rt = Is64BitMode ? X86::reloc_pcrel_word : X86::reloc_picrel_word;
267 emitConstPoolAddress(RelocOp->getIndex(), rt,
268 RelocOp->getOffset(), PCAdj);
269 } else if (RelocOp->isJumpTableIndex()) {
270 unsigned rt = Is64BitMode ? X86::reloc_pcrel_word : X86::reloc_picrel_word;
271 emitJumpTableAddress(RelocOp->getIndex(), rt, PCAdj);
273 assert(0 && "Unknown value to relocate!");
277 void Emitter::emitMemModRMByte(const MachineInstr &MI,
278 unsigned Op, unsigned RegOpcodeField,
280 const MachineOperand &Op3 = MI.getOperand(Op+3);
282 const MachineOperand *DispForReloc = 0;
284 // Figure out what sort of displacement we have to handle here.
285 if (Op3.isGlobalAddress()) {
287 } else if (Op3.isConstantPoolIndex()) {
288 if (Is64BitMode || IsPIC) {
291 DispVal += MCE.getConstantPoolEntryAddress(Op3.getIndex());
292 DispVal += Op3.getOffset();
294 } else if (Op3.isJumpTableIndex()) {
295 if (Is64BitMode || IsPIC) {
298 DispVal += MCE.getJumpTableEntryAddress(Op3.getIndex());
301 DispVal = Op3.getImm();
304 const MachineOperand &Base = MI.getOperand(Op);
305 const MachineOperand &Scale = MI.getOperand(Op+1);
306 const MachineOperand &IndexReg = MI.getOperand(Op+2);
308 unsigned BaseReg = Base.getReg();
310 // Is a SIB byte needed?
311 if (IndexReg.getReg() == 0 &&
312 (BaseReg == 0 || getX86RegNum(BaseReg) != N86::ESP)) {
313 if (BaseReg == 0) { // Just a displacement?
314 // Emit special case [disp32] encoding
315 MCE.emitByte(ModRMByte(0, RegOpcodeField, 5));
317 emitDisplacementField(DispForReloc, DispVal, PCAdj);
319 unsigned BaseRegNo = getX86RegNum(BaseReg);
320 if (!DispForReloc && DispVal == 0 && BaseRegNo != N86::EBP) {
321 // Emit simple indirect register encoding... [EAX] f.e.
322 MCE.emitByte(ModRMByte(0, RegOpcodeField, BaseRegNo));
323 } else if (!DispForReloc && isDisp8(DispVal)) {
324 // Emit the disp8 encoding... [REG+disp8]
325 MCE.emitByte(ModRMByte(1, RegOpcodeField, BaseRegNo));
326 emitConstant(DispVal, 1);
328 // Emit the most general non-SIB encoding: [REG+disp32]
329 MCE.emitByte(ModRMByte(2, RegOpcodeField, BaseRegNo));
330 emitDisplacementField(DispForReloc, DispVal, PCAdj);
334 } else { // We need a SIB byte, so start by outputting the ModR/M byte first
335 assert(IndexReg.getReg() != X86::ESP &&
336 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
338 bool ForceDisp32 = false;
339 bool ForceDisp8 = false;
341 // If there is no base register, we emit the special case SIB byte with
342 // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
343 MCE.emitByte(ModRMByte(0, RegOpcodeField, 4));
345 } else if (DispForReloc) {
346 // Emit the normal disp32 encoding.
347 MCE.emitByte(ModRMByte(2, RegOpcodeField, 4));
349 } else if (DispVal == 0 && getX86RegNum(BaseReg) != N86::EBP) {
350 // Emit no displacement ModR/M byte
351 MCE.emitByte(ModRMByte(0, RegOpcodeField, 4));
352 } else if (isDisp8(DispVal)) {
353 // Emit the disp8 encoding...
354 MCE.emitByte(ModRMByte(1, RegOpcodeField, 4));
355 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
357 // Emit the normal disp32 encoding...
358 MCE.emitByte(ModRMByte(2, RegOpcodeField, 4));
361 // Calculate what the SS field value should be...
362 static const unsigned SSTable[] = { ~0, 0, 1, ~0, 2, ~0, ~0, ~0, 3 };
363 unsigned SS = SSTable[Scale.getImm()];
366 // Handle the SIB byte for the case where there is no base. The
367 // displacement has already been output.
368 assert(IndexReg.getReg() && "Index register must be specified!");
369 emitSIBByte(SS, getX86RegNum(IndexReg.getReg()), 5);
371 unsigned BaseRegNo = getX86RegNum(BaseReg);
373 if (IndexReg.getReg())
374 IndexRegNo = getX86RegNum(IndexReg.getReg());
376 IndexRegNo = 4; // For example [ESP+1*<noreg>+4]
377 emitSIBByte(SS, IndexRegNo, BaseRegNo);
380 // Do we need to output a displacement?
382 emitConstant(DispVal, 1);
383 } else if (DispVal != 0 || ForceDisp32) {
384 emitDisplacementField(DispForReloc, DispVal, PCAdj);
389 static unsigned sizeOfImm(const TargetInstrDescriptor *Desc) {
390 switch (Desc->TSFlags & X86II::ImmMask) {
391 case X86II::Imm8: return 1;
392 case X86II::Imm16: return 2;
393 case X86II::Imm32: return 4;
394 case X86II::Imm64: return 8;
395 default: assert(0 && "Immediate size not set!");
400 /// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended register?
401 /// e.g. r8, xmm8, etc.
402 bool Emitter::isX86_64ExtendedReg(const MachineOperand &MO) {
403 if (!MO.isRegister()) return false;
404 switch (MO.getReg()) {
406 case X86::R8: case X86::R9: case X86::R10: case X86::R11:
407 case X86::R12: case X86::R13: case X86::R14: case X86::R15:
408 case X86::R8D: case X86::R9D: case X86::R10D: case X86::R11D:
409 case X86::R12D: case X86::R13D: case X86::R14D: case X86::R15D:
410 case X86::R8W: case X86::R9W: case X86::R10W: case X86::R11W:
411 case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W:
412 case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B:
413 case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B:
414 case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
415 case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
421 inline static bool isX86_64NonExtLowByteReg(unsigned reg) {
422 return (reg == X86::SPL || reg == X86::BPL ||
423 reg == X86::SIL || reg == X86::DIL);
426 /// determineREX - Determine if the MachineInstr has to be encoded with a X86-64
427 /// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
428 /// size, and 3) use of X86-64 extended registers.
429 unsigned Emitter::determineREX(const MachineInstr &MI) {
431 const TargetInstrDescriptor *Desc = MI.getInstrDescriptor();
433 // Pseudo instructions do not need REX prefix byte.
434 if ((Desc->TSFlags & X86II::FormMask) == X86II::Pseudo)
436 if (Desc->TSFlags & X86II::REX_W)
439 unsigned NumOps = Desc->numOperands;
441 bool isTwoAddr = NumOps > 1 &&
442 Desc->getOperandConstraint(1, TOI::TIED_TO) != -1;
444 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
445 unsigned i = isTwoAddr ? 1 : 0;
446 for (unsigned e = NumOps; i != e; ++i) {
447 const MachineOperand& MO = MI.getOperand(i);
448 if (MO.isRegister()) {
449 unsigned Reg = MO.getReg();
450 if (isX86_64NonExtLowByteReg(Reg))
455 switch (Desc->TSFlags & X86II::FormMask) {
456 case X86II::MRMInitReg:
457 if (isX86_64ExtendedReg(MI.getOperand(0)))
458 REX |= (1 << 0) | (1 << 2);
460 case X86II::MRMSrcReg: {
461 if (isX86_64ExtendedReg(MI.getOperand(0)))
463 i = isTwoAddr ? 2 : 1;
464 for (unsigned e = NumOps; i != e; ++i) {
465 const MachineOperand& MO = MI.getOperand(i);
466 if (isX86_64ExtendedReg(MO))
471 case X86II::MRMSrcMem: {
472 if (isX86_64ExtendedReg(MI.getOperand(0)))
475 i = isTwoAddr ? 2 : 1;
476 for (; i != NumOps; ++i) {
477 const MachineOperand& MO = MI.getOperand(i);
478 if (MO.isRegister()) {
479 if (isX86_64ExtendedReg(MO))
486 case X86II::MRM0m: case X86II::MRM1m:
487 case X86II::MRM2m: case X86II::MRM3m:
488 case X86II::MRM4m: case X86II::MRM5m:
489 case X86II::MRM6m: case X86II::MRM7m:
490 case X86II::MRMDestMem: {
491 unsigned e = isTwoAddr ? 5 : 4;
492 i = isTwoAddr ? 1 : 0;
493 if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e)))
496 for (; i != e; ++i) {
497 const MachineOperand& MO = MI.getOperand(i);
498 if (MO.isRegister()) {
499 if (isX86_64ExtendedReg(MO))
507 if (isX86_64ExtendedReg(MI.getOperand(0)))
509 i = isTwoAddr ? 2 : 1;
510 for (unsigned e = NumOps; i != e; ++i) {
511 const MachineOperand& MO = MI.getOperand(i);
512 if (isX86_64ExtendedReg(MO))
522 void Emitter::emitInstruction(const MachineInstr &MI) {
523 NumEmitted++; // Keep track of the # of mi's emitted
525 const TargetInstrDescriptor *Desc = MI.getInstrDescriptor();
526 unsigned Opcode = Desc->Opcode;
528 // Emit the repeat opcode prefix as needed.
529 if ((Desc->TSFlags & X86II::Op0Mask) == X86II::REP) MCE.emitByte(0xF3);
531 // Emit the operand size opcode prefix as needed.
532 if (Desc->TSFlags & X86II::OpSize) MCE.emitByte(0x66);
534 // Emit the address size opcode prefix as needed.
535 if (Desc->TSFlags & X86II::AdSize) MCE.emitByte(0x67);
537 bool Need0FPrefix = false;
538 switch (Desc->TSFlags & X86II::Op0Mask) {
540 Need0FPrefix = true; // Two-byte opcode prefix
550 case X86II::REP: break; // already handled.
551 case X86II::XS: // F3 0F
555 case X86II::XD: // F2 0F
559 case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB:
560 case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF:
562 (((Desc->TSFlags & X86II::Op0Mask)-X86II::D8)
563 >> X86II::Op0Shift));
564 break; // Two-byte opcode prefix
565 default: assert(0 && "Invalid prefix!");
566 case 0: break; // No prefix!
571 unsigned REX = determineREX(MI);
573 MCE.emitByte(0x40 | REX);
576 // 0x0F escape code must be emitted just before the opcode.
580 // If this is a two-address instruction, skip one of the register operands.
581 unsigned NumOps = Desc->numOperands;
583 if (NumOps > 1 && Desc->getOperandConstraint(1, TOI::TIED_TO) != -1)
586 unsigned char BaseOpcode = II->getBaseOpcodeFor(Desc);
587 switch (Desc->TSFlags & X86II::FormMask) {
588 default: assert(0 && "Unknown FormMask value in X86 MachineCodeEmitter!");
593 assert(0 && "psuedo instructions should be removed before code emission");
594 case TargetInstrInfo::INLINEASM:
595 assert(0 && "JIT does not support inline asm!\n");
596 case TargetInstrInfo::LABEL:
597 assert(0 && "JIT does not support meta labels!\n");
598 case X86::IMPLICIT_USE:
599 case X86::IMPLICIT_DEF:
600 case X86::IMPLICIT_DEF_GR8:
601 case X86::IMPLICIT_DEF_GR16:
602 case X86::IMPLICIT_DEF_GR32:
603 case X86::IMPLICIT_DEF_GR64:
604 case X86::IMPLICIT_DEF_FR32:
605 case X86::IMPLICIT_DEF_FR64:
606 case X86::IMPLICIT_DEF_VR64:
607 case X86::IMPLICIT_DEF_VR128:
608 case X86::FP_REG_KILL:
616 MCE.emitByte(BaseOpcode);
617 if (CurOp != NumOps) {
618 const MachineOperand &MO = MI.getOperand(CurOp++);
619 if (MO.isMachineBasicBlock()) {
620 emitPCRelativeBlockAddress(MO.getMBB());
621 } else if (MO.isGlobalAddress()) {
622 bool NeedStub = Is64BitMode && TM.getCodeModel() == CodeModel::Large;
623 emitGlobalAddress(MO.getGlobal(), X86::reloc_pcrel_word,
625 } else if (MO.isExternalSymbol()) {
626 emitExternalSymbolAddress(MO.getSymbolName(), X86::reloc_pcrel_word);
627 } else if (MO.isImmediate()) {
628 emitConstant(MO.getImm(), sizeOfImm(Desc));
630 assert(0 && "Unknown RawFrm operand!");
634 // Remember the current PC offset, this is the PIC relocation
636 if (Opcode == X86::MovePCtoStack)
637 PICBase = MCE.getCurrentPCOffset();
640 case X86II::AddRegFrm:
641 MCE.emitByte(BaseOpcode + getX86RegNum(MI.getOperand(CurOp++).getReg()));
643 if (CurOp != NumOps) {
644 const MachineOperand &MO1 = MI.getOperand(CurOp++);
645 unsigned Size = sizeOfImm(Desc);
646 if (MO1.isImmediate())
647 emitConstant(MO1.getImm(), Size);
649 unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
650 : (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
651 if (Opcode == X86::MOV64ri)
652 rt = X86::reloc_absolute_dword; // FIXME: add X86II flag?
653 if (MO1.isGlobalAddress()) {
654 bool NeedStub = isa<Function>(MO1.getGlobal());
655 bool isLazy = gvNeedsLazyPtr(MO1.getGlobal());
656 emitGlobalAddress(MO1.getGlobal(), rt, MO1.getOffset(), 0,
658 } else if (MO1.isExternalSymbol())
659 emitExternalSymbolAddress(MO1.getSymbolName(), rt);
660 else if (MO1.isConstantPoolIndex())
661 emitConstPoolAddress(MO1.getIndex(), rt);
662 else if (MO1.isJumpTableIndex())
663 emitJumpTableAddress(MO1.getIndex(), rt);
668 case X86II::MRMDestReg: {
669 MCE.emitByte(BaseOpcode);
670 emitRegModRMByte(MI.getOperand(CurOp).getReg(),
671 getX86RegNum(MI.getOperand(CurOp+1).getReg()));
674 emitConstant(MI.getOperand(CurOp++).getImm(), sizeOfImm(Desc));
677 case X86II::MRMDestMem: {
678 MCE.emitByte(BaseOpcode);
679 emitMemModRMByte(MI, CurOp, getX86RegNum(MI.getOperand(CurOp+4).getReg()));
682 emitConstant(MI.getOperand(CurOp++).getImm(), sizeOfImm(Desc));
686 case X86II::MRMSrcReg:
687 MCE.emitByte(BaseOpcode);
688 emitRegModRMByte(MI.getOperand(CurOp+1).getReg(),
689 getX86RegNum(MI.getOperand(CurOp).getReg()));
692 emitConstant(MI.getOperand(CurOp++).getImm(), sizeOfImm(Desc));
695 case X86II::MRMSrcMem: {
696 intptr_t PCAdj = (CurOp+5 != NumOps) ? sizeOfImm(Desc) : 0;
698 MCE.emitByte(BaseOpcode);
699 emitMemModRMByte(MI, CurOp+1, getX86RegNum(MI.getOperand(CurOp).getReg()),
703 emitConstant(MI.getOperand(CurOp++).getImm(), sizeOfImm(Desc));
707 case X86II::MRM0r: case X86II::MRM1r:
708 case X86II::MRM2r: case X86II::MRM3r:
709 case X86II::MRM4r: case X86II::MRM5r:
710 case X86II::MRM6r: case X86II::MRM7r:
711 MCE.emitByte(BaseOpcode);
712 emitRegModRMByte(MI.getOperand(CurOp++).getReg(),
713 (Desc->TSFlags & X86II::FormMask)-X86II::MRM0r);
715 if (CurOp != NumOps) {
716 const MachineOperand &MO1 = MI.getOperand(CurOp++);
717 unsigned Size = sizeOfImm(Desc);
718 if (MO1.isImmediate())
719 emitConstant(MO1.getImm(), Size);
721 unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
722 : (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
723 if (Opcode == X86::MOV64ri32)
724 rt = X86::reloc_absolute_word; // FIXME: add X86II flag?
725 if (MO1.isGlobalAddress()) {
726 bool NeedStub = isa<Function>(MO1.getGlobal());
727 bool isLazy = gvNeedsLazyPtr(MO1.getGlobal());
728 emitGlobalAddress(MO1.getGlobal(), rt, MO1.getOffset(), 0,
730 } else if (MO1.isExternalSymbol())
731 emitExternalSymbolAddress(MO1.getSymbolName(), rt);
732 else if (MO1.isConstantPoolIndex())
733 emitConstPoolAddress(MO1.getIndex(), rt);
734 else if (MO1.isJumpTableIndex())
735 emitJumpTableAddress(MO1.getIndex(), rt);
740 case X86II::MRM0m: case X86II::MRM1m:
741 case X86II::MRM2m: case X86II::MRM3m:
742 case X86II::MRM4m: case X86II::MRM5m:
743 case X86II::MRM6m: case X86II::MRM7m: {
744 intptr_t PCAdj = (CurOp+4 != NumOps) ?
745 (MI.getOperand(CurOp+4).isImmediate() ? sizeOfImm(Desc) : 4) : 0;
747 MCE.emitByte(BaseOpcode);
748 emitMemModRMByte(MI, CurOp, (Desc->TSFlags & X86II::FormMask)-X86II::MRM0m,
752 if (CurOp != NumOps) {
753 const MachineOperand &MO = MI.getOperand(CurOp++);
754 unsigned Size = sizeOfImm(Desc);
755 if (MO.isImmediate())
756 emitConstant(MO.getImm(), Size);
758 unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
759 : (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
760 if (Opcode == X86::MOV64mi32)
761 rt = X86::reloc_absolute_word; // FIXME: add X86II flag?
762 if (MO.isGlobalAddress()) {
763 bool NeedStub = isa<Function>(MO.getGlobal());
764 bool isLazy = gvNeedsLazyPtr(MO.getGlobal());
765 emitGlobalAddress(MO.getGlobal(), rt, MO.getOffset(), 0,
767 } else if (MO.isExternalSymbol())
768 emitExternalSymbolAddress(MO.getSymbolName(), rt);
769 else if (MO.isConstantPoolIndex())
770 emitConstPoolAddress(MO.getIndex(), rt);
771 else if (MO.isJumpTableIndex())
772 emitJumpTableAddress(MO.getIndex(), rt);
778 case X86II::MRMInitReg:
779 MCE.emitByte(BaseOpcode);
780 // Duplicate register, used by things like MOV8r0 (aka xor reg,reg).
781 emitRegModRMByte(MI.getOperand(CurOp).getReg(),
782 getX86RegNum(MI.getOperand(CurOp).getReg()));
787 assert((Desc->Flags & M_VARIABLE_OPS) != 0 ||
788 CurOp == NumOps && "Unknown encoding!");