1 //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/X86BaseInfo.h"
11 #include "MCTargetDesc/X86FixupKinds.h"
12 #include "llvm/ADT/StringSwitch.h"
13 #include "llvm/MC/MCAsmBackend.h"
14 #include "llvm/MC/MCAssembler.h"
15 #include "llvm/MC/MCELFObjectWriter.h"
16 #include "llvm/MC/MCExpr.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCMachObjectWriter.h"
19 #include "llvm/MC/MCObjectWriter.h"
20 #include "llvm/MC/MCSectionCOFF.h"
21 #include "llvm/MC/MCSectionELF.h"
22 #include "llvm/MC/MCSectionMachO.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/ELF.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/MachO.h"
27 #include "llvm/Support/TargetRegistry.h"
28 #include "llvm/Support/raw_ostream.h"
31 // Option to allow disabling arithmetic relaxation to workaround PR9807, which
32 // is useful when running bitwise comparison experiments on Darwin. We should be
33 // able to remove this once PR9807 is resolved.
35 MCDisableArithRelaxation("mc-x86-disable-arith-relaxation",
36 cl::desc("Disable relaxation of arithmetic instruction for X86"));
38 static unsigned getFixupKindLog2Size(unsigned Kind) {
40 default: llvm_unreachable("invalid fixup kind!");
43 case FK_Data_1: return 0;
46 case FK_Data_2: return 1;
48 case X86::reloc_riprel_4byte:
49 case X86::reloc_riprel_4byte_movq_load:
50 case X86::reloc_signed_4byte:
51 case X86::reloc_global_offset_table:
53 case FK_Data_4: return 2;
56 case FK_Data_8: return 3;
62 class X86ELFObjectWriter : public MCELFObjectTargetWriter {
64 X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine,
65 bool HasRelocationAddend, bool foobar)
66 : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {}
69 class X86AsmBackend : public MCAsmBackend {
73 X86AsmBackend(const Target &T, StringRef _CPU)
74 : MCAsmBackend(), CPU(_CPU) {
75 HasNopl = CPU != "generic" && CPU != "i386" && CPU != "i486" &&
76 CPU != "i586" && CPU != "pentium" && CPU != "pentium-mmx" &&
77 CPU != "i686" && CPU != "k6" && CPU != "k6-2" && CPU != "k6-3" &&
78 CPU != "geode" && CPU != "winchip-c6" && CPU != "winchip2" &&
79 CPU != "c3" && CPU != "c3-2";
82 unsigned getNumFixupKinds() const {
83 return X86::NumTargetFixupKinds;
86 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
87 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
88 { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
89 { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel},
90 { "reloc_signed_4byte", 0, 4 * 8, 0},
91 { "reloc_global_offset_table", 0, 4 * 8, 0}
94 if (Kind < FirstTargetFixupKind)
95 return MCAsmBackend::getFixupKindInfo(Kind);
97 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
99 return Infos[Kind - FirstTargetFixupKind];
102 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
103 uint64_t Value) const {
104 unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
106 assert(Fixup.getOffset() + Size <= DataSize &&
107 "Invalid fixup offset!");
109 // Check that uppper bits are either all zeros or all ones.
110 // Specifically ignore overflow/underflow as long as the leakage is
111 // limited to the lower bits. This is to remain compatible with
113 assert(isIntN(Size * 8 + 1, Value) &&
114 "Value does not fit in the Fixup field");
116 for (unsigned i = 0; i != Size; ++i)
117 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
120 bool mayNeedRelaxation(const MCInst &Inst) const;
122 bool fixupNeedsRelaxation(const MCFixup &Fixup,
124 const MCRelaxableFragment *DF,
125 const MCAsmLayout &Layout) const;
127 void relaxInstruction(const MCInst &Inst, MCInst &Res) const;
129 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const;
131 } // end anonymous namespace
133 static unsigned getRelaxedOpcodeBranch(unsigned Op) {
138 case X86::JAE_1: return X86::JAE_4;
139 case X86::JA_1: return X86::JA_4;
140 case X86::JBE_1: return X86::JBE_4;
141 case X86::JB_1: return X86::JB_4;
142 case X86::JE_1: return X86::JE_4;
143 case X86::JGE_1: return X86::JGE_4;
144 case X86::JG_1: return X86::JG_4;
145 case X86::JLE_1: return X86::JLE_4;
146 case X86::JL_1: return X86::JL_4;
147 case X86::JMP_1: return X86::JMP_4;
148 case X86::JNE_1: return X86::JNE_4;
149 case X86::JNO_1: return X86::JNO_4;
150 case X86::JNP_1: return X86::JNP_4;
151 case X86::JNS_1: return X86::JNS_4;
152 case X86::JO_1: return X86::JO_4;
153 case X86::JP_1: return X86::JP_4;
154 case X86::JS_1: return X86::JS_4;
158 static unsigned getRelaxedOpcodeArith(unsigned Op) {
164 case X86::IMUL16rri8: return X86::IMUL16rri;
165 case X86::IMUL16rmi8: return X86::IMUL16rmi;
166 case X86::IMUL32rri8: return X86::IMUL32rri;
167 case X86::IMUL32rmi8: return X86::IMUL32rmi;
168 case X86::IMUL64rri8: return X86::IMUL64rri32;
169 case X86::IMUL64rmi8: return X86::IMUL64rmi32;
172 case X86::AND16ri8: return X86::AND16ri;
173 case X86::AND16mi8: return X86::AND16mi;
174 case X86::AND32ri8: return X86::AND32ri;
175 case X86::AND32mi8: return X86::AND32mi;
176 case X86::AND64ri8: return X86::AND64ri32;
177 case X86::AND64mi8: return X86::AND64mi32;
180 case X86::OR16ri8: return X86::OR16ri;
181 case X86::OR16mi8: return X86::OR16mi;
182 case X86::OR32ri8: return X86::OR32ri;
183 case X86::OR32mi8: return X86::OR32mi;
184 case X86::OR64ri8: return X86::OR64ri32;
185 case X86::OR64mi8: return X86::OR64mi32;
188 case X86::XOR16ri8: return X86::XOR16ri;
189 case X86::XOR16mi8: return X86::XOR16mi;
190 case X86::XOR32ri8: return X86::XOR32ri;
191 case X86::XOR32mi8: return X86::XOR32mi;
192 case X86::XOR64ri8: return X86::XOR64ri32;
193 case X86::XOR64mi8: return X86::XOR64mi32;
196 case X86::ADD16ri8: return X86::ADD16ri;
197 case X86::ADD16mi8: return X86::ADD16mi;
198 case X86::ADD32ri8: return X86::ADD32ri;
199 case X86::ADD32mi8: return X86::ADD32mi;
200 case X86::ADD64ri8: return X86::ADD64ri32;
201 case X86::ADD64mi8: return X86::ADD64mi32;
204 case X86::SUB16ri8: return X86::SUB16ri;
205 case X86::SUB16mi8: return X86::SUB16mi;
206 case X86::SUB32ri8: return X86::SUB32ri;
207 case X86::SUB32mi8: return X86::SUB32mi;
208 case X86::SUB64ri8: return X86::SUB64ri32;
209 case X86::SUB64mi8: return X86::SUB64mi32;
212 case X86::CMP16ri8: return X86::CMP16ri;
213 case X86::CMP16mi8: return X86::CMP16mi;
214 case X86::CMP32ri8: return X86::CMP32ri;
215 case X86::CMP32mi8: return X86::CMP32mi;
216 case X86::CMP64ri8: return X86::CMP64ri32;
217 case X86::CMP64mi8: return X86::CMP64mi32;
220 case X86::PUSH32i8: return X86::PUSHi32;
221 case X86::PUSH16i8: return X86::PUSHi16;
222 case X86::PUSH64i8: return X86::PUSH64i32;
223 case X86::PUSH64i16: return X86::PUSH64i32;
227 static unsigned getRelaxedOpcode(unsigned Op) {
228 unsigned R = getRelaxedOpcodeArith(Op);
231 return getRelaxedOpcodeBranch(Op);
234 bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
235 // Branches can always be relaxed.
236 if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode())
239 if (MCDisableArithRelaxation)
242 // Check if this instruction is ever relaxable.
243 if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode())
247 // Check if it has an expression and is not RIP relative.
250 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
251 const MCOperand &Op = Inst.getOperand(i);
255 if (Op.isReg() && Op.getReg() == X86::RIP)
259 // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on
260 // how we do relaxations?
261 return hasExp && !hasRIP;
264 bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
266 const MCRelaxableFragment *DF,
267 const MCAsmLayout &Layout) const {
268 // Relax if the value is too big for a (signed) i8.
269 return int64_t(Value) != int64_t(int8_t(Value));
272 // FIXME: Can tblgen help at all here to verify there aren't other instructions
274 void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
275 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
276 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
278 if (RelaxedOp == Inst.getOpcode()) {
279 SmallString<256> Tmp;
280 raw_svector_ostream OS(Tmp);
281 Inst.dump_pretty(OS);
283 report_fatal_error("unexpected instruction to relax: " + OS.str());
287 Res.setOpcode(RelaxedOp);
290 /// \brief Write a sequence of optimal nops to the output, covering \p Count
292 /// \return - true on success, false on failure
293 bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
294 static const uint8_t Nops[10][10] = {
302 {0x0f, 0x1f, 0x40, 0x00},
303 // nopl 0(%[re]ax,%[re]ax,1)
304 {0x0f, 0x1f, 0x44, 0x00, 0x00},
305 // nopw 0(%[re]ax,%[re]ax,1)
306 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
308 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
309 // nopl 0L(%[re]ax,%[re]ax,1)
310 {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
311 // nopw 0L(%[re]ax,%[re]ax,1)
312 {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
313 // nopw %cs:0L(%[re]ax,%[re]ax,1)
314 {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
317 // This CPU doesn't support long nops. If needed add more.
318 // FIXME: Can we get this from the subtarget somehow?
319 // FIXME: We could generated something better than plain 0x90.
321 for (uint64_t i = 0; i < Count; ++i)
326 // 15 is the longest single nop instruction. Emit as many 15-byte nops as
327 // needed, then emit a nop of the remaining length.
329 const uint8_t ThisNopLength = (uint8_t) std::min(Count, (uint64_t) 15);
330 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
331 for (uint8_t i = 0; i < Prefixes; i++)
333 const uint8_t Rest = ThisNopLength - Prefixes;
334 for (uint8_t i = 0; i < Rest; i++)
335 OW->Write8(Nops[Rest - 1][i]);
336 Count -= ThisNopLength;
337 } while (Count != 0);
346 class ELFX86AsmBackend : public X86AsmBackend {
349 ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU)
350 : X86AsmBackend(T, CPU), OSABI(_OSABI) {
351 HasReliableSymbolDifference = true;
355 class ELFX86_32AsmBackend : public ELFX86AsmBackend {
357 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
358 : ELFX86AsmBackend(T, OSABI, CPU) {}
360 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
361 return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386);
365 class ELFX86_64AsmBackend : public ELFX86AsmBackend {
367 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
368 : ELFX86AsmBackend(T, OSABI, CPU) {}
370 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
371 return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64);
375 class WindowsX86AsmBackend : public X86AsmBackend {
379 WindowsX86AsmBackend(const Target &T, bool is64Bit, StringRef CPU)
380 : X86AsmBackend(T, CPU)
384 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
385 return createX86WinCOFFObjectWriter(OS, Is64Bit);
391 /// Compact unwind encoding values.
392 enum CompactUnwindEncodings {
393 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
394 /// the return address, then [RE]SP is moved to [RE]BP.
395 UNWIND_MODE_BP_FRAME = 0x01000000,
397 /// A frameless function with a small constant stack size.
398 UNWIND_MODE_STACK_IMMD = 0x02000000,
400 /// A frameless function with a large constant stack size.
401 UNWIND_MODE_STACK_IND = 0x03000000,
403 /// No compact unwind encoding is available.
404 UNWIND_MODE_DWARF = 0x04000000,
406 /// Mask for encoding the frame registers.
407 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
409 /// Mask for encoding the frameless registers.
410 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
413 } // end CU namespace
415 class DarwinX86AsmBackend : public X86AsmBackend {
416 const MCRegisterInfo &MRI;
418 /// \brief Number of registers that can be saved in a compact unwind encoding.
419 enum { CU_NUM_SAVED_REGS = 6 };
421 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
424 unsigned OffsetSize; ///< Offset of a "push" instruction.
425 unsigned PushInstrSize; ///< Size of a "push" instruction.
426 unsigned MoveInstrSize; ///< Size of a "move" instruction.
427 unsigned StackDivide; ///< Amount to adjust stack stize by.
429 /// \brief Implementation of algorithm to generate the compact unwind encoding
430 /// for the CFI instructions.
432 generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const {
433 if (Instrs.empty()) return 0;
435 // Reset the saved registers.
436 unsigned SavedRegIdx = 0;
437 memset(SavedRegs, 0, sizeof(SavedRegs));
441 // Encode that we are using EBP/RBP as the frame pointer.
442 uint32_t CompactUnwindEncoding = 0;
444 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
445 unsigned InstrOffset = 0;
446 unsigned StackAdjust = 0;
447 unsigned StackSize = 0;
448 unsigned PrevStackSize = 0;
449 unsigned NumDefCFAOffsets = 0;
451 for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
452 const MCCFIInstruction &Inst = Instrs[i];
454 switch (Inst.getOperation()) {
456 // Any other CFI directives indicate a frame that we aren't prepared
457 // to represent via compact unwind, so just bail out.
459 case MCCFIInstruction::OpDefCfaRegister: {
460 // Defines a frame pointer. E.g.
464 // .cfi_def_cfa_register %rbp
467 assert(MRI.getLLVMRegNum(Inst.getRegister(), true) ==
468 (Is64Bit ? X86::RBP : X86::EBP) && "Invalid frame pointer!");
471 memset(SavedRegs, 0, sizeof(SavedRegs));
474 InstrOffset += MoveInstrSize;
477 case MCCFIInstruction::OpDefCfaOffset: {
478 // Defines a new offset for the CFA. E.g.
484 // .cfi_def_cfa_offset 16
490 // .cfi_def_cfa_offset 80
492 PrevStackSize = StackSize;
493 StackSize = std::abs(Inst.getOffset()) / StackDivide;
497 case MCCFIInstruction::OpOffset: {
498 // Defines a "push" of a callee-saved register. E.g.
506 // .cfi_offset %rbx, -40
507 // .cfi_offset %r14, -32
508 // .cfi_offset %r15, -24
510 if (SavedRegIdx == CU_NUM_SAVED_REGS)
511 // If there are too many saved registers, we cannot use a compact
513 return CU::UNWIND_MODE_DWARF;
515 unsigned Reg = MRI.getLLVMRegNum(Inst.getRegister(), true);
516 SavedRegs[SavedRegIdx++] = Reg;
517 StackAdjust += OffsetSize;
518 InstrOffset += PushInstrSize;
524 StackAdjust /= StackDivide;
527 if ((StackAdjust & 0xFF) != StackAdjust)
528 // Offset was too big for a compact unwind encoding.
529 return CU::UNWIND_MODE_DWARF;
531 // Get the encoding of the saved registers when we have a frame pointer.
532 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
533 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
535 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
536 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
537 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
539 // If the amount of the stack allocation is the size of a register, then
540 // we "push" the RAX/EAX register onto the stack instead of adjusting the
541 // stack pointer with a SUB instruction. We don't support the push of the
542 // RAX/EAX register with compact unwind. So we check for that situation
544 if ((NumDefCFAOffsets == SavedRegIdx + 1 &&
545 StackSize - PrevStackSize == 1) ||
546 (Instrs.size() == 1 && NumDefCFAOffsets == 1 && StackSize == 2))
547 return CU::UNWIND_MODE_DWARF;
549 SubtractInstrIdx += InstrOffset;
552 if ((StackSize & 0xFF) == StackSize) {
553 // Frameless stack with a small stack size.
554 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
556 // Encode the stack size.
557 CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
559 if ((StackAdjust & 0x7) != StackAdjust)
560 // The extra stack adjustments are too big for us to handle.
561 return CU::UNWIND_MODE_DWARF;
563 // Frameless stack with an offset too large for us to encode compactly.
564 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
566 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
568 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
570 // Encode any extra stack stack adjustments (done via push
572 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
575 // Encode the number of registers saved. (Reverse the list first.)
576 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
577 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
579 // Get the encoding of the saved registers when we don't have a frame
581 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
582 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
584 // Encode the register encoding.
585 CompactUnwindEncoding |=
586 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
589 return CompactUnwindEncoding;
593 /// \brief Get the compact unwind number for a given register. The number
594 /// corresponds to the enum lists in compact_unwind_encoding.h.
595 int getCompactUnwindRegNum(unsigned Reg) const {
596 static const uint16_t CU32BitRegs[7] = {
597 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
599 static const uint16_t CU64BitRegs[] = {
600 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
602 const uint16_t *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
603 for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
610 /// \brief Return the registers encoded for a compact encoding with a frame
612 uint32_t encodeCompactUnwindRegistersWithFrame() const {
613 // Encode the registers in the order they were saved --- 3-bits per
614 // register. The list of saved registers is assumed to be in reverse
615 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
617 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
618 unsigned Reg = SavedRegs[i];
621 int CURegNum = getCompactUnwindRegNum(Reg);
622 if (CURegNum == -1) return ~0U;
624 // Encode the 3-bit register number in order, skipping over 3-bits for
626 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
629 assert((RegEnc & 0x3FFFF) == RegEnc &&
630 "Invalid compact register encoding!");
634 /// \brief Create the permutation encoding used with frameless stacks. It is
635 /// passed the number of registers to be saved and an array of the registers
637 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
638 // The saved registers are numbered from 1 to 6. In order to encode the
639 // order in which they were saved, we re-number them according to their
640 // place in the register order. The re-numbering is relative to the last
641 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
651 for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) {
652 int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
653 if (CUReg == -1) return ~0U;
654 SavedRegs[i] = CUReg;
658 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
660 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
661 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
662 unsigned Countless = 0;
663 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
664 if (SavedRegs[j] < SavedRegs[i])
667 RenumRegs[i] = SavedRegs[i] - Countless - 1;
670 // Take the renumbered values and encode them into a 10-bit number.
671 uint32_t permutationEncoding = 0;
674 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
675 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
679 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
680 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
684 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
685 + 3 * RenumRegs[4] + RenumRegs[5];
688 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
692 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
695 permutationEncoding |= RenumRegs[5];
699 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
700 "Invalid compact register encoding!");
701 return permutationEncoding;
705 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI, StringRef CPU,
707 : X86AsmBackend(T, CPU), MRI(MRI), Is64Bit(Is64Bit) {
708 memset(SavedRegs, 0, sizeof(SavedRegs));
709 OffsetSize = Is64Bit ? 8 : 4;
710 MoveInstrSize = Is64Bit ? 3 : 2;
711 StackDivide = Is64Bit ? 8 : 4;
716 class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
719 DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI,
720 StringRef CPU, bool SupportsCU)
721 : DarwinX86AsmBackend(T, MRI, CPU, false), SupportsCU(SupportsCU) {}
723 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
724 return createX86MachObjectWriter(OS, /*Is64Bit=*/false,
725 MachO::CPU_TYPE_I386,
726 MachO::CPU_SUBTYPE_I386_ALL);
729 /// \brief Generate the compact unwind encoding for the CFI instructions.
731 generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const {
732 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0;
736 class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
738 const MachO::CPUSubTypeX86 Subtype;
740 DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
741 StringRef CPU, bool SupportsCU,
742 MachO::CPUSubTypeX86 st)
743 : DarwinX86AsmBackend(T, MRI, CPU, true), SupportsCU(SupportsCU),
745 HasReliableSymbolDifference = true;
748 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
749 return createX86MachObjectWriter(OS, /*Is64Bit=*/true,
750 MachO::CPU_TYPE_X86_64, Subtype);
753 virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
754 // Temporary labels in the string literals sections require symbols. The
755 // issue is that the x86_64 relocation format does not allow symbol +
756 // offset, and so the linker does not have enough information to resolve the
757 // access to the appropriate atom unless an external relocation is used. For
758 // non-cstring sections, we expect the compiler to use a non-temporary label
759 // for anything that could have an addend pointing outside the symbol.
761 // See <rdar://problem/4765733>.
762 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
763 return SMO.getType() == MCSectionMachO::S_CSTRING_LITERALS;
766 virtual bool isSectionAtomizable(const MCSection &Section) const {
767 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
768 // Fixed sized data sections are uniqued, they cannot be diced into atoms.
769 switch (SMO.getType()) {
773 case MCSectionMachO::S_4BYTE_LITERALS:
774 case MCSectionMachO::S_8BYTE_LITERALS:
775 case MCSectionMachO::S_16BYTE_LITERALS:
776 case MCSectionMachO::S_LITERAL_POINTERS:
777 case MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS:
778 case MCSectionMachO::S_LAZY_SYMBOL_POINTERS:
779 case MCSectionMachO::S_MOD_INIT_FUNC_POINTERS:
780 case MCSectionMachO::S_MOD_TERM_FUNC_POINTERS:
781 case MCSectionMachO::S_INTERPOSING:
786 /// \brief Generate the compact unwind encoding for the CFI instructions.
788 generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const {
789 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0;
793 } // end anonymous namespace
795 MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
796 const MCRegisterInfo &MRI,
799 Triple TheTriple(TT);
801 if (TheTriple.isOSBinFormatMachO())
802 return new DarwinX86_32AsmBackend(T, MRI, CPU,
803 TheTriple.isMacOSX() &&
804 !TheTriple.isMacOSXVersionLT(10, 7));
806 if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
807 return new WindowsX86AsmBackend(T, false, CPU);
809 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
810 return new ELFX86_32AsmBackend(T, OSABI, CPU);
813 MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
814 const MCRegisterInfo &MRI,
817 Triple TheTriple(TT);
819 if (TheTriple.isOSBinFormatMachO()) {
820 MachO::CPUSubTypeX86 CS =
821 StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName())
822 .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H)
823 .Default(MachO::CPU_SUBTYPE_X86_64_ALL);
824 return new DarwinX86_64AsmBackend(T, MRI, CPU,
825 TheTriple.isMacOSX() &&
826 !TheTriple.isMacOSXVersionLT(10, 7), CS);
829 if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
830 return new WindowsX86AsmBackend(T, true, CPU);
832 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
833 return new ELFX86_64AsmBackend(T, OSABI, CPU);