1 //===-- X86AsmInstrumentation.cpp - Instrument X86 inline assembly C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/X86BaseInfo.h"
11 #include "X86AsmInstrumentation.h"
12 #include "X86Operand.h"
13 #include "X86RegisterInfo.h"
14 #include "llvm/ADT/StringExtras.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/MC/MCAsmInfo.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCInstBuilder.h"
20 #include "llvm/MC/MCInstrInfo.h"
21 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
22 #include "llvm/MC/MCStreamer.h"
23 #include "llvm/MC/MCSubtargetInfo.h"
24 #include "llvm/MC/MCTargetAsmParser.h"
25 #include "llvm/MC/MCTargetOptions.h"
26 #include "llvm/Support/CommandLine.h"
31 // Following comment describes how assembly instrumentation works.
32 // Currently we have only AddressSanitizer instrumentation, but we're
33 // planning to implement MemorySanitizer for inline assembly too. If
34 // you're not familiar with AddressSanitizer algorithm, please, read
35 // https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm.
37 // When inline assembly is parsed by an instance of X86AsmParser, all
38 // instructions are emitted via EmitInstruction method. That's the
39 // place where X86AsmInstrumentation analyzes an instruction and
40 // decides, whether the instruction should be emitted as is or
41 // instrumentation is required. The latter case happens when an
42 // instruction reads from or writes to memory. Now instruction opcode
43 // is explicitly checked, and if an instruction has a memory operand
44 // (for instance, movq (%rsi, %rcx, 8), %rax) - it should be
45 // instrumented. There're also exist instructions that modify
46 // memory but don't have an explicit memory operands, for instance,
49 // Let's consider at first 8-byte memory accesses when an instruction
50 // has an explicit memory operand. In this case we need two registers -
51 // AddressReg to compute address of a memory cells which are accessed
52 // and ShadowReg to compute corresponding shadow address. So, we need
53 // to spill both registers before instrumentation code and restore them
54 // after instrumentation. Thus, in general, instrumentation code will
56 // PUSHF # Store flags, otherwise they will be overwritten
57 // PUSH AddressReg # spill AddressReg
58 // PUSH ShadowReg # spill ShadowReg
59 // LEA MemOp, AddressReg # compute address of the memory operand
60 // MOV AddressReg, ShadowReg
62 // # ShadowOffset(AddressReg >> 3) contains address of a shadow
63 // # corresponding to MemOp.
64 // CMP ShadowOffset(ShadowReg), 0 # test shadow value
65 // JZ .Done # when shadow equals to zero, everything is fine
66 // MOV AddressReg, RDI
67 // # Call __asan_report function with AddressReg as an argument
70 // POP ShadowReg # Restore ShadowReg
71 // POP AddressReg # Restore AddressReg
72 // POPF # Restore flags
74 // Memory accesses with different size (1-, 2-, 4- and 16-byte) are
75 // handled in a similar manner, but small memory accesses (less than 8
76 // byte) require an additional ScratchReg, which is used for shadow value.
78 // If, suppose, we're instrumenting an instruction like movs, only
79 // contents of RDI, RDI + AccessSize * RCX, RSI, RSI + AccessSize *
80 // RCX are checked. In this case there're no need to spill and restore
81 // AddressReg , ShadowReg or flags four times, they're saved on stack
82 // just once, before instrumentation of these four addresses, and restored
83 // at the end of the instrumentation.
85 // There exist several things which complicate this simple algorithm.
86 // * Instrumented memory operand can have RSP as a base or an index
87 // register. So we need to add a constant offset before computation
88 // of memory address, since flags, AddressReg, ShadowReg, etc. were
89 // already stored on stack and RSP was modified.
90 // * Debug info (usually, DWARF) should be adjusted, because sometimes
91 // RSP is used as a frame register. So, we need to select some
92 // register as a frame register and temprorary override current CFA
98 static cl::opt<bool> ClAsanInstrumentAssembly(
99 "asan-instrument-assembly",
100 cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden,
103 const int64_t MinAllowedDisplacement = std::numeric_limits<int32_t>::min();
104 const int64_t MaxAllowedDisplacement = std::numeric_limits<int32_t>::max();
106 int64_t ApplyDisplacementBounds(int64_t Displacement) {
107 return std::max(std::min(MaxAllowedDisplacement, Displacement),
108 MinAllowedDisplacement);
111 void CheckDisplacementBounds(int64_t Displacement) {
112 assert(Displacement >= MinAllowedDisplacement &&
113 Displacement <= MaxAllowedDisplacement);
116 bool IsStackReg(unsigned Reg) { return Reg == X86::RSP || Reg == X86::ESP; }
118 bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; }
120 class X86AddressSanitizer : public X86AsmInstrumentation {
122 struct RegisterContext {
125 REG_OFFSET_ADDRESS = 0,
131 RegisterContext(unsigned AddressReg, unsigned ShadowReg,
132 unsigned ScratchReg) {
133 BusyRegs.push_back(convReg(AddressReg, 64));
134 BusyRegs.push_back(convReg(ShadowReg, 64));
135 BusyRegs.push_back(convReg(ScratchReg, 64));
138 unsigned AddressReg(unsigned Size) const {
139 return convReg(BusyRegs[REG_OFFSET_ADDRESS], Size);
142 unsigned ShadowReg(unsigned Size) const {
143 return convReg(BusyRegs[REG_OFFSET_SHADOW], Size);
146 unsigned ScratchReg(unsigned Size) const {
147 return convReg(BusyRegs[REG_OFFSET_SCRATCH], Size);
150 void AddBusyReg(unsigned Reg) {
151 if (Reg != X86::NoRegister)
152 BusyRegs.push_back(convReg(Reg, 64));
155 void AddBusyRegs(const X86Operand &Op) {
156 AddBusyReg(Op.getMemBaseReg());
157 AddBusyReg(Op.getMemIndexReg());
160 unsigned ChooseFrameReg(unsigned Size) const {
161 static const MCPhysReg Candidates[] = { X86::RBP, X86::RAX, X86::RBX,
162 X86::RCX, X86::RDX, X86::RDI,
164 for (unsigned Reg : Candidates) {
165 if (!std::count(BusyRegs.begin(), BusyRegs.end(), Reg))
166 return convReg(Reg, Size);
168 return X86::NoRegister;
172 unsigned convReg(unsigned Reg, unsigned Size) const {
173 return Reg == X86::NoRegister ? Reg : getX86SubSuperRegister(Reg, Size);
176 std::vector<unsigned> BusyRegs;
179 X86AddressSanitizer(const MCSubtargetInfo *&STI)
180 : X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {}
182 ~X86AddressSanitizer() override {}
184 // X86AsmInstrumentation implementation:
185 void InstrumentAndEmitInstruction(const MCInst &Inst,
186 OperandVector &Operands,
188 const MCInstrInfo &MII,
189 MCStreamer &Out) override {
190 InstrumentMOVS(Inst, Operands, Ctx, MII, Out);
192 EmitInstruction(Out, MCInstBuilder(X86::REP_PREFIX));
194 InstrumentMOV(Inst, Operands, Ctx, MII, Out);
196 RepPrefix = (Inst.getOpcode() == X86::REP_PREFIX);
198 EmitInstruction(Out, Inst);
201 // Adjusts up stack and saves all registers used in instrumentation.
202 virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
204 MCStreamer &Out) = 0;
206 // Restores all registers used in instrumentation and adjusts stack.
207 virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
209 MCStreamer &Out) = 0;
211 virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
213 const RegisterContext &RegCtx,
214 MCContext &Ctx, MCStreamer &Out) = 0;
215 virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
217 const RegisterContext &RegCtx,
218 MCContext &Ctx, MCStreamer &Out) = 0;
220 virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
221 MCStreamer &Out) = 0;
223 void InstrumentMemOperand(X86Operand &Op, unsigned AccessSize, bool IsWrite,
224 const RegisterContext &RegCtx, MCContext &Ctx,
226 void InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg, unsigned CntReg,
227 unsigned AccessSize, MCContext &Ctx, MCStreamer &Out);
229 void InstrumentMOVS(const MCInst &Inst, OperandVector &Operands,
230 MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
231 void InstrumentMOV(const MCInst &Inst, OperandVector &Operands,
232 MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
235 void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); }
237 void EmitLEA(X86Operand &Op, unsigned Size, unsigned Reg, MCStreamer &Out) {
238 assert(Size == 32 || Size == 64);
240 Inst.setOpcode(Size == 32 ? X86::LEA32r : X86::LEA64r);
241 Inst.addOperand(MCOperand::createReg(getX86SubSuperRegister(Reg, Size)));
242 Op.addMemOperands(Inst, 5);
243 EmitInstruction(Out, Inst);
246 void ComputeMemOperandAddress(X86Operand &Op, unsigned Size,
247 unsigned Reg, MCContext &Ctx, MCStreamer &Out);
249 // Creates new memory operand with Displacement added to an original
250 // displacement. Residue will contain a residue which could happen when the
251 // total displacement exceeds 32-bit limitation.
252 std::unique_ptr<X86Operand> AddDisplacement(X86Operand &Op,
253 int64_t Displacement,
254 MCContext &Ctx, int64_t *Residue);
256 bool is64BitMode() const {
257 return STI->getFeatureBits()[X86::Mode64Bit];
259 bool is32BitMode() const {
260 return STI->getFeatureBits()[X86::Mode32Bit];
262 bool is16BitMode() const {
263 return STI->getFeatureBits()[X86::Mode16Bit];
266 unsigned getPointerWidth() {
267 if (is16BitMode()) return 16;
268 if (is32BitMode()) return 32;
269 if (is64BitMode()) return 64;
270 llvm_unreachable("invalid mode");
273 // True when previous instruction was actually REP prefix.
276 // Offset from the original SP register.
277 int64_t OrigSPOffset;
280 void X86AddressSanitizer::InstrumentMemOperand(
281 X86Operand &Op, unsigned AccessSize, bool IsWrite,
282 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
283 assert(Op.isMem() && "Op should be a memory operand.");
284 assert((AccessSize & (AccessSize - 1)) == 0 && AccessSize <= 16 &&
285 "AccessSize should be a power of two, less or equal than 16.");
286 // FIXME: take into account load/store alignment.
287 if (IsSmallMemAccess(AccessSize))
288 InstrumentMemOperandSmall(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
290 InstrumentMemOperandLarge(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
293 void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg,
296 MCContext &Ctx, MCStreamer &Out) {
297 // FIXME: check whole ranges [DstReg .. DstReg + AccessSize * (CntReg - 1)]
298 // and [SrcReg .. SrcReg + AccessSize * (CntReg - 1)].
299 RegisterContext RegCtx(X86::RDX /* AddressReg */, X86::RAX /* ShadowReg */,
300 IsSmallMemAccess(AccessSize)
302 : X86::NoRegister /* ScratchReg */);
303 RegCtx.AddBusyReg(DstReg);
304 RegCtx.AddBusyReg(SrcReg);
305 RegCtx.AddBusyReg(CntReg);
307 InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
311 const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
312 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
313 getPointerWidth(), 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc()));
314 InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
318 // Test -1(%SrcReg, %CntReg, AccessSize)
320 const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
321 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
322 getPointerWidth(), 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(),
324 InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
330 const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
331 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
332 getPointerWidth(), 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc()));
333 InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
336 // Test -1(%DstReg, %CntReg, AccessSize)
338 const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
339 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
340 getPointerWidth(), 0, Disp, DstReg, CntReg, AccessSize, SMLoc(),
342 InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
345 InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
348 void X86AddressSanitizer::InstrumentMOVS(const MCInst &Inst,
349 OperandVector &Operands,
350 MCContext &Ctx, const MCInstrInfo &MII,
352 // Access size in bytes.
353 unsigned AccessSize = 0;
355 switch (Inst.getOpcode()) {
372 InstrumentMOVSImpl(AccessSize, Ctx, Out);
375 void X86AddressSanitizer::InstrumentMOV(const MCInst &Inst,
376 OperandVector &Operands, MCContext &Ctx,
377 const MCInstrInfo &MII,
379 // Access size in bytes.
380 unsigned AccessSize = 0;
382 switch (Inst.getOpcode()) {
413 const bool IsWrite = MII.get(Inst.getOpcode()).mayStore();
415 for (unsigned Ix = 0; Ix < Operands.size(); ++Ix) {
416 assert(Operands[Ix]);
417 MCParsedAsmOperand &Op = *Operands[Ix];
419 X86Operand &MemOp = static_cast<X86Operand &>(Op);
420 RegisterContext RegCtx(
421 X86::RDI /* AddressReg */, X86::RAX /* ShadowReg */,
422 IsSmallMemAccess(AccessSize) ? X86::RCX
423 : X86::NoRegister /* ScratchReg */);
424 RegCtx.AddBusyRegs(MemOp);
425 InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
426 InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out);
427 InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
432 void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op,
434 unsigned Reg, MCContext &Ctx,
436 int64_t Displacement = 0;
437 if (IsStackReg(Op.getMemBaseReg()))
438 Displacement -= OrigSPOffset;
439 if (IsStackReg(Op.getMemIndexReg()))
440 Displacement -= OrigSPOffset * Op.getMemScale();
442 assert(Displacement >= 0);
445 if (Displacement == 0) {
446 EmitLEA(Op, Size, Reg, Out);
451 std::unique_ptr<X86Operand> NewOp =
452 AddDisplacement(Op, Displacement, Ctx, &Residue);
453 EmitLEA(*NewOp, Size, Reg, Out);
455 while (Residue != 0) {
456 const MCConstantExpr *Disp =
457 MCConstantExpr::create(ApplyDisplacementBounds(Residue), Ctx);
458 std::unique_ptr<X86Operand> DispOp =
459 X86Operand::CreateMem(getPointerWidth(), 0, Disp, Reg, 0, 1, SMLoc(),
461 EmitLEA(*DispOp, Size, Reg, Out);
462 Residue -= Disp->getValue();
466 std::unique_ptr<X86Operand>
467 X86AddressSanitizer::AddDisplacement(X86Operand &Op, int64_t Displacement,
468 MCContext &Ctx, int64_t *Residue) {
469 assert(Displacement >= 0);
471 if (Displacement == 0 ||
472 (Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant)) {
473 *Residue = Displacement;
474 return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(),
475 Op.getMemDisp(), Op.getMemBaseReg(),
476 Op.getMemIndexReg(), Op.getMemScale(),
480 int64_t OrigDisplacement =
481 static_cast<const MCConstantExpr *>(Op.getMemDisp())->getValue();
482 CheckDisplacementBounds(OrigDisplacement);
483 Displacement += OrigDisplacement;
485 int64_t NewDisplacement = ApplyDisplacementBounds(Displacement);
486 CheckDisplacementBounds(NewDisplacement);
488 *Residue = Displacement - NewDisplacement;
489 const MCExpr *Disp = MCConstantExpr::create(NewDisplacement, Ctx);
490 return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(), Disp,
491 Op.getMemBaseReg(), Op.getMemIndexReg(),
492 Op.getMemScale(), SMLoc(), SMLoc());
495 class X86AddressSanitizer32 : public X86AddressSanitizer {
497 static const long kShadowOffset = 0x20000000;
499 X86AddressSanitizer32(const MCSubtargetInfo *&STI)
500 : X86AddressSanitizer(STI) {}
502 ~X86AddressSanitizer32() override {}
504 unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
505 unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
506 if (FrameReg == X86::NoRegister)
508 return getX86SubSuperRegister(FrameReg, 32);
511 void SpillReg(MCStreamer &Out, unsigned Reg) {
512 EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(Reg));
516 void RestoreReg(MCStreamer &Out, unsigned Reg) {
517 EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(Reg));
521 void StoreFlags(MCStreamer &Out) {
522 EmitInstruction(Out, MCInstBuilder(X86::PUSHF32));
526 void RestoreFlags(MCStreamer &Out) {
527 EmitInstruction(Out, MCInstBuilder(X86::POPF32));
531 void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
533 MCStreamer &Out) override {
534 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(32);
535 assert(LocalFrameReg != X86::NoRegister);
537 const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
538 unsigned FrameReg = GetFrameReg(Ctx, Out);
539 if (MRI && FrameReg != X86::NoRegister) {
540 SpillReg(Out, LocalFrameReg);
541 if (FrameReg == X86::ESP) {
542 Out.EmitCFIAdjustCfaOffset(4 /* byte size of the LocalFrameReg */);
543 Out.EmitCFIRelOffset(
544 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
548 MCInstBuilder(X86::MOV32rr).addReg(LocalFrameReg).addReg(FrameReg));
549 Out.EmitCFIRememberState();
550 Out.EmitCFIDefCfaRegister(
551 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
554 SpillReg(Out, RegCtx.AddressReg(32));
555 SpillReg(Out, RegCtx.ShadowReg(32));
556 if (RegCtx.ScratchReg(32) != X86::NoRegister)
557 SpillReg(Out, RegCtx.ScratchReg(32));
561 void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
563 MCStreamer &Out) override {
564 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(32);
565 assert(LocalFrameReg != X86::NoRegister);
568 if (RegCtx.ScratchReg(32) != X86::NoRegister)
569 RestoreReg(Out, RegCtx.ScratchReg(32));
570 RestoreReg(Out, RegCtx.ShadowReg(32));
571 RestoreReg(Out, RegCtx.AddressReg(32));
573 unsigned FrameReg = GetFrameReg(Ctx, Out);
574 if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
575 RestoreReg(Out, LocalFrameReg);
576 Out.EmitCFIRestoreState();
577 if (FrameReg == X86::ESP)
578 Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the LocalFrameReg */);
582 void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
584 const RegisterContext &RegCtx,
586 MCStreamer &Out) override;
587 void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
589 const RegisterContext &RegCtx,
591 MCStreamer &Out) override;
592 void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
593 MCStreamer &Out) override;
596 void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
597 MCStreamer &Out, const RegisterContext &RegCtx) {
598 EmitInstruction(Out, MCInstBuilder(X86::CLD));
599 EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
601 EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
606 Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.AddressReg(32)));
608 MCSymbol *FnSym = Ctx.getOrCreateSymbol(llvm::Twine("__asan_report_") +
609 (IsWrite ? "store" : "load") +
610 llvm::Twine(AccessSize));
611 const MCSymbolRefExpr *FnExpr =
612 MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
613 EmitInstruction(Out, MCInstBuilder(X86::CALLpcrel32).addExpr(FnExpr));
617 void X86AddressSanitizer32::InstrumentMemOperandSmall(
618 X86Operand &Op, unsigned AccessSize, bool IsWrite,
619 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
620 unsigned AddressRegI32 = RegCtx.AddressReg(32);
621 unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
622 unsigned ShadowRegI8 = RegCtx.ShadowReg(8);
624 assert(RegCtx.ScratchReg(32) != X86::NoRegister);
625 unsigned ScratchRegI32 = RegCtx.ScratchReg(32);
627 ComputeMemOperandAddress(Op, 32, AddressRegI32, Ctx, Out);
629 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
631 EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
632 .addReg(ShadowRegI32)
633 .addReg(ShadowRegI32)
638 Inst.setOpcode(X86::MOV8rm);
639 Inst.addOperand(MCOperand::createReg(ShadowRegI8));
640 const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
641 std::unique_ptr<X86Operand> Op(
642 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
644 Op->addMemOperands(Inst, 5);
645 EmitInstruction(Out, Inst);
649 Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
650 MCSymbol *DoneSym = Ctx.createTempSymbol();
651 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
652 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
654 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
656 EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
657 .addReg(ScratchRegI32)
658 .addReg(ScratchRegI32)
661 switch (AccessSize) {
662 default: llvm_unreachable("Incorrect access size");
666 const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
667 std::unique_ptr<X86Operand> Op(
668 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
670 EmitLEA(*Op, 32, ScratchRegI32, Out);
674 EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
675 .addReg(ScratchRegI32)
676 .addReg(ScratchRegI32)
683 MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
684 EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
686 EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
688 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
689 EmitLabel(Out, DoneSym);
692 void X86AddressSanitizer32::InstrumentMemOperandLarge(
693 X86Operand &Op, unsigned AccessSize, bool IsWrite,
694 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
695 unsigned AddressRegI32 = RegCtx.AddressReg(32);
696 unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
698 ComputeMemOperandAddress(Op, 32, AddressRegI32, Ctx, Out);
700 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
702 EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
703 .addReg(ShadowRegI32)
704 .addReg(ShadowRegI32)
708 switch (AccessSize) {
709 default: llvm_unreachable("Incorrect access size");
711 Inst.setOpcode(X86::CMP8mi);
714 Inst.setOpcode(X86::CMP16mi);
717 const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
718 std::unique_ptr<X86Operand> Op(
719 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
721 Op->addMemOperands(Inst, 5);
722 Inst.addOperand(MCOperand::createImm(0));
723 EmitInstruction(Out, Inst);
725 MCSymbol *DoneSym = Ctx.createTempSymbol();
726 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
727 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
729 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
730 EmitLabel(Out, DoneSym);
733 void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize,
738 // No need to test when ECX is equals to zero.
739 MCSymbol *DoneSym = Ctx.createTempSymbol();
740 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
742 Out, MCInstBuilder(X86::TEST32rr).addReg(X86::ECX).addReg(X86::ECX));
743 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
745 // Instrument first and last elements in src and dst range.
746 InstrumentMOVSBase(X86::EDI /* DstReg */, X86::ESI /* SrcReg */,
747 X86::ECX /* CntReg */, AccessSize, Ctx, Out);
749 EmitLabel(Out, DoneSym);
753 class X86AddressSanitizer64 : public X86AddressSanitizer {
755 static const long kShadowOffset = 0x7fff8000;
757 X86AddressSanitizer64(const MCSubtargetInfo *&STI)
758 : X86AddressSanitizer(STI) {}
760 ~X86AddressSanitizer64() override {}
762 unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
763 unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
764 if (FrameReg == X86::NoRegister)
766 return getX86SubSuperRegister(FrameReg, 64);
769 void SpillReg(MCStreamer &Out, unsigned Reg) {
770 EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(Reg));
774 void RestoreReg(MCStreamer &Out, unsigned Reg) {
775 EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(Reg));
779 void StoreFlags(MCStreamer &Out) {
780 EmitInstruction(Out, MCInstBuilder(X86::PUSHF64));
784 void RestoreFlags(MCStreamer &Out) {
785 EmitInstruction(Out, MCInstBuilder(X86::POPF64));
789 void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
791 MCStreamer &Out) override {
792 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(64);
793 assert(LocalFrameReg != X86::NoRegister);
795 const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
796 unsigned FrameReg = GetFrameReg(Ctx, Out);
797 if (MRI && FrameReg != X86::NoRegister) {
798 SpillReg(Out, X86::RBP);
799 if (FrameReg == X86::RSP) {
800 Out.EmitCFIAdjustCfaOffset(8 /* byte size of the LocalFrameReg */);
801 Out.EmitCFIRelOffset(
802 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
806 MCInstBuilder(X86::MOV64rr).addReg(LocalFrameReg).addReg(FrameReg));
807 Out.EmitCFIRememberState();
808 Out.EmitCFIDefCfaRegister(
809 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
812 EmitAdjustRSP(Ctx, Out, -128);
813 SpillReg(Out, RegCtx.ShadowReg(64));
814 SpillReg(Out, RegCtx.AddressReg(64));
815 if (RegCtx.ScratchReg(64) != X86::NoRegister)
816 SpillReg(Out, RegCtx.ScratchReg(64));
820 void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
822 MCStreamer &Out) override {
823 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(64);
824 assert(LocalFrameReg != X86::NoRegister);
827 if (RegCtx.ScratchReg(64) != X86::NoRegister)
828 RestoreReg(Out, RegCtx.ScratchReg(64));
829 RestoreReg(Out, RegCtx.AddressReg(64));
830 RestoreReg(Out, RegCtx.ShadowReg(64));
831 EmitAdjustRSP(Ctx, Out, 128);
833 unsigned FrameReg = GetFrameReg(Ctx, Out);
834 if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
835 RestoreReg(Out, LocalFrameReg);
836 Out.EmitCFIRestoreState();
837 if (FrameReg == X86::RSP)
838 Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the LocalFrameReg */);
842 void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
844 const RegisterContext &RegCtx,
846 MCStreamer &Out) override;
847 void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
849 const RegisterContext &RegCtx,
851 MCStreamer &Out) override;
852 void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
853 MCStreamer &Out) override;
856 void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) {
857 const MCExpr *Disp = MCConstantExpr::create(Offset, Ctx);
858 std::unique_ptr<X86Operand> Op(
859 X86Operand::CreateMem(getPointerWidth(), 0, Disp, X86::RSP, 0, 1,
861 EmitLEA(*Op, 64, X86::RSP, Out);
862 OrigSPOffset += Offset;
865 void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
866 MCStreamer &Out, const RegisterContext &RegCtx) {
867 EmitInstruction(Out, MCInstBuilder(X86::CLD));
868 EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
870 EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
875 if (RegCtx.AddressReg(64) != X86::RDI) {
876 EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RDI).addReg(
877 RegCtx.AddressReg(64)));
879 MCSymbol *FnSym = Ctx.getOrCreateSymbol(llvm::Twine("__asan_report_") +
880 (IsWrite ? "store" : "load") +
881 llvm::Twine(AccessSize));
882 const MCSymbolRefExpr *FnExpr =
883 MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
884 EmitInstruction(Out, MCInstBuilder(X86::CALL64pcrel32).addExpr(FnExpr));
888 void X86AddressSanitizer64::InstrumentMemOperandSmall(
889 X86Operand &Op, unsigned AccessSize, bool IsWrite,
890 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
891 unsigned AddressRegI64 = RegCtx.AddressReg(64);
892 unsigned AddressRegI32 = RegCtx.AddressReg(32);
893 unsigned ShadowRegI64 = RegCtx.ShadowReg(64);
894 unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
895 unsigned ShadowRegI8 = RegCtx.ShadowReg(8);
897 assert(RegCtx.ScratchReg(32) != X86::NoRegister);
898 unsigned ScratchRegI32 = RegCtx.ScratchReg(32);
900 ComputeMemOperandAddress(Op, 64, AddressRegI64, Ctx, Out);
902 EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
904 EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
905 .addReg(ShadowRegI64)
906 .addReg(ShadowRegI64)
910 Inst.setOpcode(X86::MOV8rm);
911 Inst.addOperand(MCOperand::createReg(ShadowRegI8));
912 const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
913 std::unique_ptr<X86Operand> Op(
914 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
916 Op->addMemOperands(Inst, 5);
917 EmitInstruction(Out, Inst);
921 Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
922 MCSymbol *DoneSym = Ctx.createTempSymbol();
923 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
924 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
926 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
928 EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
929 .addReg(ScratchRegI32)
930 .addReg(ScratchRegI32)
933 switch (AccessSize) {
934 default: llvm_unreachable("Incorrect access size");
938 const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
939 std::unique_ptr<X86Operand> Op(
940 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
942 EmitLEA(*Op, 32, ScratchRegI32, Out);
946 EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
947 .addReg(ScratchRegI32)
948 .addReg(ScratchRegI32)
955 MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
956 EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
958 EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
960 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
961 EmitLabel(Out, DoneSym);
964 void X86AddressSanitizer64::InstrumentMemOperandLarge(
965 X86Operand &Op, unsigned AccessSize, bool IsWrite,
966 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
967 unsigned AddressRegI64 = RegCtx.AddressReg(64);
968 unsigned ShadowRegI64 = RegCtx.ShadowReg(64);
970 ComputeMemOperandAddress(Op, 64, AddressRegI64, Ctx, Out);
972 EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
974 EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
975 .addReg(ShadowRegI64)
976 .addReg(ShadowRegI64)
980 switch (AccessSize) {
981 default: llvm_unreachable("Incorrect access size");
983 Inst.setOpcode(X86::CMP8mi);
986 Inst.setOpcode(X86::CMP16mi);
989 const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
990 std::unique_ptr<X86Operand> Op(
991 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
993 Op->addMemOperands(Inst, 5);
994 Inst.addOperand(MCOperand::createImm(0));
995 EmitInstruction(Out, Inst);
998 MCSymbol *DoneSym = Ctx.createTempSymbol();
999 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
1000 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
1002 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
1003 EmitLabel(Out, DoneSym);
1006 void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize,
1011 // No need to test when RCX is equals to zero.
1012 MCSymbol *DoneSym = Ctx.createTempSymbol();
1013 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
1015 Out, MCInstBuilder(X86::TEST64rr).addReg(X86::RCX).addReg(X86::RCX));
1016 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
1018 // Instrument first and last elements in src and dst range.
1019 InstrumentMOVSBase(X86::RDI /* DstReg */, X86::RSI /* SrcReg */,
1020 X86::RCX /* CntReg */, AccessSize, Ctx, Out);
1022 EmitLabel(Out, DoneSym);
1026 } // End anonymous namespace
1028 X86AsmInstrumentation::X86AsmInstrumentation(const MCSubtargetInfo *&STI)
1029 : STI(STI), InitialFrameReg(0) {}
1031 X86AsmInstrumentation::~X86AsmInstrumentation() {}
1033 void X86AsmInstrumentation::InstrumentAndEmitInstruction(
1034 const MCInst &Inst, OperandVector &Operands, MCContext &Ctx,
1035 const MCInstrInfo &MII, MCStreamer &Out) {
1036 EmitInstruction(Out, Inst);
1039 void X86AsmInstrumentation::EmitInstruction(MCStreamer &Out,
1040 const MCInst &Inst) {
1041 Out.EmitInstruction(Inst, *STI);
1044 unsigned X86AsmInstrumentation::GetFrameRegGeneric(const MCContext &Ctx,
1046 if (!Out.getNumFrameInfos()) // No active dwarf frame
1047 return X86::NoRegister;
1048 const MCDwarfFrameInfo &Frame = Out.getDwarfFrameInfos().back();
1049 if (Frame.End) // Active dwarf frame is closed
1050 return X86::NoRegister;
1051 const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
1052 if (!MRI) // No register info
1053 return X86::NoRegister;
1055 if (InitialFrameReg) {
1056 // FrameReg is set explicitly, we're instrumenting a MachineFunction.
1057 return InitialFrameReg;
1060 return MRI->getLLVMRegNum(Frame.CurrentCfaRegister, true /* IsEH */);
1063 X86AsmInstrumentation *
1064 CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
1065 const MCContext &Ctx, const MCSubtargetInfo *&STI) {
1066 Triple T(STI->getTargetTriple());
1067 const bool hasCompilerRTSupport = T.isOSLinux();
1068 if (ClAsanInstrumentAssembly && hasCompilerRTSupport &&
1069 MCOptions.SanitizeAddress) {
1070 if (STI->getFeatureBits()[X86::Mode32Bit] != 0)
1071 return new X86AddressSanitizer32(STI);
1072 if (STI->getFeatureBits()[X86::Mode64Bit] != 0)
1073 return new X86AddressSanitizer64(STI);
1075 return new X86AsmInstrumentation(STI);
1078 } // end llvm namespace