1 //===-- X86AsmInstrumentation.cpp - Instrument X86 inline assembly C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/X86BaseInfo.h"
11 #include "X86AsmInstrumentation.h"
12 #include "X86Operand.h"
13 #include "X86RegisterInfo.h"
14 #include "llvm/ADT/StringExtras.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/CodeGen/MachineValueType.h"
17 #include "llvm/MC/MCAsmInfo.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCInst.h"
20 #include "llvm/MC/MCInstBuilder.h"
21 #include "llvm/MC/MCInstrInfo.h"
22 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
23 #include "llvm/MC/MCStreamer.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/MC/MCTargetAsmParser.h"
26 #include "llvm/MC/MCTargetOptions.h"
27 #include "llvm/Support/CommandLine.h"
32 // Following comment describes how assembly instrumentation works.
33 // Currently we have only AddressSanitizer instrumentation, but we're
34 // planning to implement MemorySanitizer for inline assembly too. If
35 // you're not familiar with AddressSanitizer algorithm, please, read
36 // https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm.
38 // When inline assembly is parsed by an instance of X86AsmParser, all
39 // instructions are emitted via EmitInstruction method. That's the
40 // place where X86AsmInstrumentation analyzes an instruction and
41 // decides, whether the instruction should be emitted as is or
42 // instrumentation is required. The latter case happens when an
43 // instruction reads from or writes to memory. Now instruction opcode
44 // is explicitly checked, and if an instruction has a memory operand
45 // (for instance, movq (%rsi, %rcx, 8), %rax) - it should be
46 // instrumented. There're also exist instructions that modify
47 // memory but don't have an explicit memory operands, for instance,
50 // Let's consider at first 8-byte memory accesses when an instruction
51 // has an explicit memory operand. In this case we need two registers -
52 // AddressReg to compute address of a memory cells which are accessed
53 // and ShadowReg to compute corresponding shadow address. So, we need
54 // to spill both registers before instrumentation code and restore them
55 // after instrumentation. Thus, in general, instrumentation code will
57 // PUSHF # Store flags, otherwise they will be overwritten
58 // PUSH AddressReg # spill AddressReg
59 // PUSH ShadowReg # spill ShadowReg
60 // LEA MemOp, AddressReg # compute address of the memory operand
61 // MOV AddressReg, ShadowReg
63 // # ShadowOffset(AddressReg >> 3) contains address of a shadow
64 // # corresponding to MemOp.
65 // CMP ShadowOffset(ShadowReg), 0 # test shadow value
66 // JZ .Done # when shadow equals to zero, everything is fine
67 // MOV AddressReg, RDI
68 // # Call __asan_report function with AddressReg as an argument
71 // POP ShadowReg # Restore ShadowReg
72 // POP AddressReg # Restore AddressReg
73 // POPF # Restore flags
75 // Memory accesses with different size (1-, 2-, 4- and 16-byte) are
76 // handled in a similar manner, but small memory accesses (less than 8
77 // byte) require an additional ScratchReg, which is used for shadow value.
79 // If, suppose, we're instrumenting an instruction like movs, only
80 // contents of RDI, RDI + AccessSize * RCX, RSI, RSI + AccessSize *
81 // RCX are checked. In this case there're no need to spill and restore
82 // AddressReg , ShadowReg or flags four times, they're saved on stack
83 // just once, before instrumentation of these four addresses, and restored
84 // at the end of the instrumentation.
86 // There exist several things which complicate this simple algorithm.
87 // * Instrumented memory operand can have RSP as a base or an index
88 // register. So we need to add a constant offset before computation
89 // of memory address, since flags, AddressReg, ShadowReg, etc. were
90 // already stored on stack and RSP was modified.
91 // * Debug info (usually, DWARF) should be adjusted, because sometimes
92 // RSP is used as a frame register. So, we need to select some
93 // register as a frame register and temprorary override current CFA
99 static cl::opt<bool> ClAsanInstrumentAssembly(
100 "asan-instrument-assembly",
101 cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden,
104 const int64_t MinAllowedDisplacement = std::numeric_limits<int32_t>::min();
105 const int64_t MaxAllowedDisplacement = std::numeric_limits<int32_t>::max();
107 int64_t ApplyDisplacementBounds(int64_t Displacement) {
108 return std::max(std::min(MaxAllowedDisplacement, Displacement),
109 MinAllowedDisplacement);
112 void CheckDisplacementBounds(int64_t Displacement) {
113 assert(Displacement >= MinAllowedDisplacement &&
114 Displacement <= MaxAllowedDisplacement);
117 bool IsStackReg(unsigned Reg) { return Reg == X86::RSP || Reg == X86::ESP; }
119 bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; }
121 std::string FuncName(unsigned AccessSize, bool IsWrite) {
122 return std::string("__asan_report_") + (IsWrite ? "store" : "load") +
126 class X86AddressSanitizer : public X86AsmInstrumentation {
128 struct RegisterContext {
131 REG_OFFSET_ADDRESS = 0,
137 RegisterContext(unsigned AddressReg, unsigned ShadowReg,
138 unsigned ScratchReg) {
139 BusyRegs.push_back(convReg(AddressReg, MVT::i64));
140 BusyRegs.push_back(convReg(ShadowReg, MVT::i64));
141 BusyRegs.push_back(convReg(ScratchReg, MVT::i64));
144 unsigned AddressReg(MVT::SimpleValueType VT) const {
145 return convReg(BusyRegs[REG_OFFSET_ADDRESS], VT);
148 unsigned ShadowReg(MVT::SimpleValueType VT) const {
149 return convReg(BusyRegs[REG_OFFSET_SHADOW], VT);
152 unsigned ScratchReg(MVT::SimpleValueType VT) const {
153 return convReg(BusyRegs[REG_OFFSET_SCRATCH], VT);
156 void AddBusyReg(unsigned Reg) {
157 if (Reg != X86::NoRegister)
158 BusyRegs.push_back(convReg(Reg, MVT::i64));
161 void AddBusyRegs(const X86Operand &Op) {
162 AddBusyReg(Op.getMemBaseReg());
163 AddBusyReg(Op.getMemIndexReg());
166 unsigned ChooseFrameReg(MVT::SimpleValueType VT) const {
167 static const MCPhysReg Candidates[] = { X86::RBP, X86::RAX, X86::RBX,
168 X86::RCX, X86::RDX, X86::RDI,
170 for (unsigned Reg : Candidates) {
171 if (!std::count(BusyRegs.begin(), BusyRegs.end(), Reg))
172 return convReg(Reg, VT);
174 return X86::NoRegister;
178 unsigned convReg(unsigned Reg, MVT::SimpleValueType VT) const {
179 return Reg == X86::NoRegister ? Reg : getX86SubSuperRegister(Reg, VT);
182 std::vector<unsigned> BusyRegs;
185 X86AddressSanitizer(const MCSubtargetInfo *&STI)
186 : X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {}
188 ~X86AddressSanitizer() override {}
190 // X86AsmInstrumentation implementation:
191 void InstrumentAndEmitInstruction(const MCInst &Inst,
192 OperandVector &Operands,
194 const MCInstrInfo &MII,
195 MCStreamer &Out) override {
196 InstrumentMOVS(Inst, Operands, Ctx, MII, Out);
198 EmitInstruction(Out, MCInstBuilder(X86::REP_PREFIX));
200 InstrumentMOV(Inst, Operands, Ctx, MII, Out);
202 RepPrefix = (Inst.getOpcode() == X86::REP_PREFIX);
204 EmitInstruction(Out, Inst);
207 // Adjusts up stack and saves all registers used in instrumentation.
208 virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
210 MCStreamer &Out) = 0;
212 // Restores all registers used in instrumentation and adjusts stack.
213 virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
215 MCStreamer &Out) = 0;
217 virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
219 const RegisterContext &RegCtx,
220 MCContext &Ctx, MCStreamer &Out) = 0;
221 virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
223 const RegisterContext &RegCtx,
224 MCContext &Ctx, MCStreamer &Out) = 0;
226 virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
227 MCStreamer &Out) = 0;
229 void InstrumentMemOperand(X86Operand &Op, unsigned AccessSize, bool IsWrite,
230 const RegisterContext &RegCtx, MCContext &Ctx,
232 void InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg, unsigned CntReg,
233 unsigned AccessSize, MCContext &Ctx, MCStreamer &Out);
235 void InstrumentMOVS(const MCInst &Inst, OperandVector &Operands,
236 MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
237 void InstrumentMOV(const MCInst &Inst, OperandVector &Operands,
238 MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
241 void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); }
243 void EmitLEA(X86Operand &Op, MVT::SimpleValueType VT, unsigned Reg,
245 assert(VT == MVT::i32 || VT == MVT::i64);
247 Inst.setOpcode(VT == MVT::i32 ? X86::LEA32r : X86::LEA64r);
248 Inst.addOperand(MCOperand::createReg(getX86SubSuperRegister(Reg, VT)));
249 Op.addMemOperands(Inst, 5);
250 EmitInstruction(Out, Inst);
253 void ComputeMemOperandAddress(X86Operand &Op, MVT::SimpleValueType VT,
254 unsigned Reg, MCContext &Ctx, MCStreamer &Out);
256 // Creates new memory operand with Displacement added to an original
257 // displacement. Residue will contain a residue which could happen when the
258 // total displacement exceeds 32-bit limitation.
259 std::unique_ptr<X86Operand> AddDisplacement(X86Operand &Op,
260 int64_t Displacement,
261 MCContext &Ctx, int64_t *Residue);
263 bool is64BitMode() const {
264 return STI->getFeatureBits()[X86::Mode64Bit];
266 bool is32BitMode() const {
267 return STI->getFeatureBits()[X86::Mode32Bit];
269 bool is16BitMode() const {
270 return STI->getFeatureBits()[X86::Mode16Bit];
273 unsigned getPointerWidth() {
274 if (is16BitMode()) return 16;
275 if (is32BitMode()) return 32;
276 if (is64BitMode()) return 64;
277 llvm_unreachable("invalid mode");
280 // True when previous instruction was actually REP prefix.
283 // Offset from the original SP register.
284 int64_t OrigSPOffset;
287 void X86AddressSanitizer::InstrumentMemOperand(
288 X86Operand &Op, unsigned AccessSize, bool IsWrite,
289 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
290 assert(Op.isMem() && "Op should be a memory operand.");
291 assert((AccessSize & (AccessSize - 1)) == 0 && AccessSize <= 16 &&
292 "AccessSize should be a power of two, less or equal than 16.");
293 // FIXME: take into account load/store alignment.
294 if (IsSmallMemAccess(AccessSize))
295 InstrumentMemOperandSmall(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
297 InstrumentMemOperandLarge(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
300 void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg,
303 MCContext &Ctx, MCStreamer &Out) {
304 // FIXME: check whole ranges [DstReg .. DstReg + AccessSize * (CntReg - 1)]
305 // and [SrcReg .. SrcReg + AccessSize * (CntReg - 1)].
306 RegisterContext RegCtx(X86::RDX /* AddressReg */, X86::RAX /* ShadowReg */,
307 IsSmallMemAccess(AccessSize)
309 : X86::NoRegister /* ScratchReg */);
310 RegCtx.AddBusyReg(DstReg);
311 RegCtx.AddBusyReg(SrcReg);
312 RegCtx.AddBusyReg(CntReg);
314 InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
318 const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
319 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
320 getPointerWidth(), 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc()));
321 InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
325 // Test -1(%SrcReg, %CntReg, AccessSize)
327 const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
328 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
329 getPointerWidth(), 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(),
331 InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
337 const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
338 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
339 getPointerWidth(), 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc()));
340 InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
343 // Test -1(%DstReg, %CntReg, AccessSize)
345 const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
346 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
347 getPointerWidth(), 0, Disp, DstReg, CntReg, AccessSize, SMLoc(),
349 InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
352 InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
355 void X86AddressSanitizer::InstrumentMOVS(const MCInst &Inst,
356 OperandVector &Operands,
357 MCContext &Ctx, const MCInstrInfo &MII,
359 // Access size in bytes.
360 unsigned AccessSize = 0;
362 switch (Inst.getOpcode()) {
379 InstrumentMOVSImpl(AccessSize, Ctx, Out);
382 void X86AddressSanitizer::InstrumentMOV(const MCInst &Inst,
383 OperandVector &Operands, MCContext &Ctx,
384 const MCInstrInfo &MII,
386 // Access size in bytes.
387 unsigned AccessSize = 0;
389 switch (Inst.getOpcode()) {
420 const bool IsWrite = MII.get(Inst.getOpcode()).mayStore();
422 for (unsigned Ix = 0; Ix < Operands.size(); ++Ix) {
423 assert(Operands[Ix]);
424 MCParsedAsmOperand &Op = *Operands[Ix];
426 X86Operand &MemOp = static_cast<X86Operand &>(Op);
427 RegisterContext RegCtx(
428 X86::RDI /* AddressReg */, X86::RAX /* ShadowReg */,
429 IsSmallMemAccess(AccessSize) ? X86::RCX
430 : X86::NoRegister /* ScratchReg */);
431 RegCtx.AddBusyRegs(MemOp);
432 InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
433 InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out);
434 InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
439 void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op,
440 MVT::SimpleValueType VT,
441 unsigned Reg, MCContext &Ctx,
443 int64_t Displacement = 0;
444 if (IsStackReg(Op.getMemBaseReg()))
445 Displacement -= OrigSPOffset;
446 if (IsStackReg(Op.getMemIndexReg()))
447 Displacement -= OrigSPOffset * Op.getMemScale();
449 assert(Displacement >= 0);
452 if (Displacement == 0) {
453 EmitLEA(Op, VT, Reg, Out);
458 std::unique_ptr<X86Operand> NewOp =
459 AddDisplacement(Op, Displacement, Ctx, &Residue);
460 EmitLEA(*NewOp, VT, Reg, Out);
462 while (Residue != 0) {
463 const MCConstantExpr *Disp =
464 MCConstantExpr::create(ApplyDisplacementBounds(Residue), Ctx);
465 std::unique_ptr<X86Operand> DispOp =
466 X86Operand::CreateMem(getPointerWidth(), 0, Disp, Reg, 0, 1, SMLoc(),
468 EmitLEA(*DispOp, VT, Reg, Out);
469 Residue -= Disp->getValue();
473 std::unique_ptr<X86Operand>
474 X86AddressSanitizer::AddDisplacement(X86Operand &Op, int64_t Displacement,
475 MCContext &Ctx, int64_t *Residue) {
476 assert(Displacement >= 0);
478 if (Displacement == 0 ||
479 (Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant)) {
480 *Residue = Displacement;
481 return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(),
482 Op.getMemDisp(), Op.getMemBaseReg(),
483 Op.getMemIndexReg(), Op.getMemScale(),
487 int64_t OrigDisplacement =
488 static_cast<const MCConstantExpr *>(Op.getMemDisp())->getValue();
489 CheckDisplacementBounds(OrigDisplacement);
490 Displacement += OrigDisplacement;
492 int64_t NewDisplacement = ApplyDisplacementBounds(Displacement);
493 CheckDisplacementBounds(NewDisplacement);
495 *Residue = Displacement - NewDisplacement;
496 const MCExpr *Disp = MCConstantExpr::create(NewDisplacement, Ctx);
497 return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(), Disp,
498 Op.getMemBaseReg(), Op.getMemIndexReg(),
499 Op.getMemScale(), SMLoc(), SMLoc());
502 class X86AddressSanitizer32 : public X86AddressSanitizer {
504 static const long kShadowOffset = 0x20000000;
506 X86AddressSanitizer32(const MCSubtargetInfo *&STI)
507 : X86AddressSanitizer(STI) {}
509 ~X86AddressSanitizer32() override {}
511 unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
512 unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
513 if (FrameReg == X86::NoRegister)
515 return getX86SubSuperRegister(FrameReg, MVT::i32);
518 void SpillReg(MCStreamer &Out, unsigned Reg) {
519 EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(Reg));
523 void RestoreReg(MCStreamer &Out, unsigned Reg) {
524 EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(Reg));
528 void StoreFlags(MCStreamer &Out) {
529 EmitInstruction(Out, MCInstBuilder(X86::PUSHF32));
533 void RestoreFlags(MCStreamer &Out) {
534 EmitInstruction(Out, MCInstBuilder(X86::POPF32));
538 void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
540 MCStreamer &Out) override {
541 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32);
542 assert(LocalFrameReg != X86::NoRegister);
544 const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
545 unsigned FrameReg = GetFrameReg(Ctx, Out);
546 if (MRI && FrameReg != X86::NoRegister) {
547 SpillReg(Out, LocalFrameReg);
548 if (FrameReg == X86::ESP) {
549 Out.EmitCFIAdjustCfaOffset(4 /* byte size of the LocalFrameReg */);
550 Out.EmitCFIRelOffset(
551 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
555 MCInstBuilder(X86::MOV32rr).addReg(LocalFrameReg).addReg(FrameReg));
556 Out.EmitCFIRememberState();
557 Out.EmitCFIDefCfaRegister(
558 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
561 SpillReg(Out, RegCtx.AddressReg(MVT::i32));
562 SpillReg(Out, RegCtx.ShadowReg(MVT::i32));
563 if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister)
564 SpillReg(Out, RegCtx.ScratchReg(MVT::i32));
568 void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
570 MCStreamer &Out) override {
571 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32);
572 assert(LocalFrameReg != X86::NoRegister);
575 if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister)
576 RestoreReg(Out, RegCtx.ScratchReg(MVT::i32));
577 RestoreReg(Out, RegCtx.ShadowReg(MVT::i32));
578 RestoreReg(Out, RegCtx.AddressReg(MVT::i32));
580 unsigned FrameReg = GetFrameReg(Ctx, Out);
581 if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
582 RestoreReg(Out, LocalFrameReg);
583 Out.EmitCFIRestoreState();
584 if (FrameReg == X86::ESP)
585 Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the LocalFrameReg */);
589 void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
591 const RegisterContext &RegCtx,
593 MCStreamer &Out) override;
594 void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
596 const RegisterContext &RegCtx,
598 MCStreamer &Out) override;
599 void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
600 MCStreamer &Out) override;
603 void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
604 MCStreamer &Out, const RegisterContext &RegCtx) {
605 EmitInstruction(Out, MCInstBuilder(X86::CLD));
606 EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
608 EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
613 Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.AddressReg(MVT::i32)));
615 const std::string &Fn = FuncName(AccessSize, IsWrite);
616 MCSymbol *FnSym = Ctx.getOrCreateSymbol(StringRef(Fn));
617 const MCSymbolRefExpr *FnExpr =
618 MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
619 EmitInstruction(Out, MCInstBuilder(X86::CALLpcrel32).addExpr(FnExpr));
623 void X86AddressSanitizer32::InstrumentMemOperandSmall(
624 X86Operand &Op, unsigned AccessSize, bool IsWrite,
625 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
626 unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
627 unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
628 unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8);
630 assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister);
631 unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32);
633 ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out);
635 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
637 EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
638 .addReg(ShadowRegI32)
639 .addReg(ShadowRegI32)
644 Inst.setOpcode(X86::MOV8rm);
645 Inst.addOperand(MCOperand::createReg(ShadowRegI8));
646 const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
647 std::unique_ptr<X86Operand> Op(
648 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
650 Op->addMemOperands(Inst, 5);
651 EmitInstruction(Out, Inst);
655 Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
656 MCSymbol *DoneSym = Ctx.createTempSymbol();
657 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
658 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
660 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
662 EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
663 .addReg(ScratchRegI32)
664 .addReg(ScratchRegI32)
667 switch (AccessSize) {
668 default: llvm_unreachable("Incorrect access size");
672 const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
673 std::unique_ptr<X86Operand> Op(
674 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
676 EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
680 EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
681 .addReg(ScratchRegI32)
682 .addReg(ScratchRegI32)
689 MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
690 EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
692 EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
694 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
695 EmitLabel(Out, DoneSym);
698 void X86AddressSanitizer32::InstrumentMemOperandLarge(
699 X86Operand &Op, unsigned AccessSize, bool IsWrite,
700 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
701 unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
702 unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
704 ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out);
706 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
708 EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
709 .addReg(ShadowRegI32)
710 .addReg(ShadowRegI32)
714 switch (AccessSize) {
715 default: llvm_unreachable("Incorrect access size");
717 Inst.setOpcode(X86::CMP8mi);
720 Inst.setOpcode(X86::CMP16mi);
723 const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
724 std::unique_ptr<X86Operand> Op(
725 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
727 Op->addMemOperands(Inst, 5);
728 Inst.addOperand(MCOperand::createImm(0));
729 EmitInstruction(Out, Inst);
731 MCSymbol *DoneSym = Ctx.createTempSymbol();
732 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
733 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
735 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
736 EmitLabel(Out, DoneSym);
739 void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize,
744 // No need to test when ECX is equals to zero.
745 MCSymbol *DoneSym = Ctx.createTempSymbol();
746 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
748 Out, MCInstBuilder(X86::TEST32rr).addReg(X86::ECX).addReg(X86::ECX));
749 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
751 // Instrument first and last elements in src and dst range.
752 InstrumentMOVSBase(X86::EDI /* DstReg */, X86::ESI /* SrcReg */,
753 X86::ECX /* CntReg */, AccessSize, Ctx, Out);
755 EmitLabel(Out, DoneSym);
759 class X86AddressSanitizer64 : public X86AddressSanitizer {
761 static const long kShadowOffset = 0x7fff8000;
763 X86AddressSanitizer64(const MCSubtargetInfo *&STI)
764 : X86AddressSanitizer(STI) {}
766 ~X86AddressSanitizer64() override {}
768 unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
769 unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
770 if (FrameReg == X86::NoRegister)
772 return getX86SubSuperRegister(FrameReg, MVT::i64);
775 void SpillReg(MCStreamer &Out, unsigned Reg) {
776 EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(Reg));
780 void RestoreReg(MCStreamer &Out, unsigned Reg) {
781 EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(Reg));
785 void StoreFlags(MCStreamer &Out) {
786 EmitInstruction(Out, MCInstBuilder(X86::PUSHF64));
790 void RestoreFlags(MCStreamer &Out) {
791 EmitInstruction(Out, MCInstBuilder(X86::POPF64));
795 void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
797 MCStreamer &Out) override {
798 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64);
799 assert(LocalFrameReg != X86::NoRegister);
801 const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
802 unsigned FrameReg = GetFrameReg(Ctx, Out);
803 if (MRI && FrameReg != X86::NoRegister) {
804 SpillReg(Out, X86::RBP);
805 if (FrameReg == X86::RSP) {
806 Out.EmitCFIAdjustCfaOffset(8 /* byte size of the LocalFrameReg */);
807 Out.EmitCFIRelOffset(
808 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
812 MCInstBuilder(X86::MOV64rr).addReg(LocalFrameReg).addReg(FrameReg));
813 Out.EmitCFIRememberState();
814 Out.EmitCFIDefCfaRegister(
815 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
818 EmitAdjustRSP(Ctx, Out, -128);
819 SpillReg(Out, RegCtx.ShadowReg(MVT::i64));
820 SpillReg(Out, RegCtx.AddressReg(MVT::i64));
821 if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister)
822 SpillReg(Out, RegCtx.ScratchReg(MVT::i64));
826 void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
828 MCStreamer &Out) override {
829 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64);
830 assert(LocalFrameReg != X86::NoRegister);
833 if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister)
834 RestoreReg(Out, RegCtx.ScratchReg(MVT::i64));
835 RestoreReg(Out, RegCtx.AddressReg(MVT::i64));
836 RestoreReg(Out, RegCtx.ShadowReg(MVT::i64));
837 EmitAdjustRSP(Ctx, Out, 128);
839 unsigned FrameReg = GetFrameReg(Ctx, Out);
840 if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
841 RestoreReg(Out, LocalFrameReg);
842 Out.EmitCFIRestoreState();
843 if (FrameReg == X86::RSP)
844 Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the LocalFrameReg */);
848 void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
850 const RegisterContext &RegCtx,
852 MCStreamer &Out) override;
853 void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
855 const RegisterContext &RegCtx,
857 MCStreamer &Out) override;
858 void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
859 MCStreamer &Out) override;
862 void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) {
863 const MCExpr *Disp = MCConstantExpr::create(Offset, Ctx);
864 std::unique_ptr<X86Operand> Op(
865 X86Operand::CreateMem(getPointerWidth(), 0, Disp, X86::RSP, 0, 1,
867 EmitLEA(*Op, MVT::i64, X86::RSP, Out);
868 OrigSPOffset += Offset;
871 void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
872 MCStreamer &Out, const RegisterContext &RegCtx) {
873 EmitInstruction(Out, MCInstBuilder(X86::CLD));
874 EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
876 EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
881 if (RegCtx.AddressReg(MVT::i64) != X86::RDI) {
882 EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RDI).addReg(
883 RegCtx.AddressReg(MVT::i64)));
885 const std::string &Fn = FuncName(AccessSize, IsWrite);
886 MCSymbol *FnSym = Ctx.getOrCreateSymbol(StringRef(Fn));
887 const MCSymbolRefExpr *FnExpr =
888 MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
889 EmitInstruction(Out, MCInstBuilder(X86::CALL64pcrel32).addExpr(FnExpr));
893 void X86AddressSanitizer64::InstrumentMemOperandSmall(
894 X86Operand &Op, unsigned AccessSize, bool IsWrite,
895 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
896 unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64);
897 unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
898 unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64);
899 unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
900 unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8);
902 assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister);
903 unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32);
905 ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out);
907 EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
909 EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
910 .addReg(ShadowRegI64)
911 .addReg(ShadowRegI64)
915 Inst.setOpcode(X86::MOV8rm);
916 Inst.addOperand(MCOperand::createReg(ShadowRegI8));
917 const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
918 std::unique_ptr<X86Operand> Op(
919 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
921 Op->addMemOperands(Inst, 5);
922 EmitInstruction(Out, Inst);
926 Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
927 MCSymbol *DoneSym = Ctx.createTempSymbol();
928 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
929 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
931 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
933 EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
934 .addReg(ScratchRegI32)
935 .addReg(ScratchRegI32)
938 switch (AccessSize) {
939 default: llvm_unreachable("Incorrect access size");
943 const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
944 std::unique_ptr<X86Operand> Op(
945 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
947 EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
951 EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
952 .addReg(ScratchRegI32)
953 .addReg(ScratchRegI32)
960 MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
961 EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
963 EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
965 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
966 EmitLabel(Out, DoneSym);
969 void X86AddressSanitizer64::InstrumentMemOperandLarge(
970 X86Operand &Op, unsigned AccessSize, bool IsWrite,
971 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
972 unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64);
973 unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64);
975 ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out);
977 EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
979 EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
980 .addReg(ShadowRegI64)
981 .addReg(ShadowRegI64)
985 switch (AccessSize) {
986 default: llvm_unreachable("Incorrect access size");
988 Inst.setOpcode(X86::CMP8mi);
991 Inst.setOpcode(X86::CMP16mi);
994 const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
995 std::unique_ptr<X86Operand> Op(
996 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
998 Op->addMemOperands(Inst, 5);
999 Inst.addOperand(MCOperand::createImm(0));
1000 EmitInstruction(Out, Inst);
1003 MCSymbol *DoneSym = Ctx.createTempSymbol();
1004 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
1005 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
1007 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
1008 EmitLabel(Out, DoneSym);
1011 void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize,
1016 // No need to test when RCX is equals to zero.
1017 MCSymbol *DoneSym = Ctx.createTempSymbol();
1018 const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
1020 Out, MCInstBuilder(X86::TEST64rr).addReg(X86::RCX).addReg(X86::RCX));
1021 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
1023 // Instrument first and last elements in src and dst range.
1024 InstrumentMOVSBase(X86::RDI /* DstReg */, X86::RSI /* SrcReg */,
1025 X86::RCX /* CntReg */, AccessSize, Ctx, Out);
1027 EmitLabel(Out, DoneSym);
1031 } // End anonymous namespace
1033 X86AsmInstrumentation::X86AsmInstrumentation(const MCSubtargetInfo *&STI)
1034 : STI(STI), InitialFrameReg(0) {}
1036 X86AsmInstrumentation::~X86AsmInstrumentation() {}
1038 void X86AsmInstrumentation::InstrumentAndEmitInstruction(
1039 const MCInst &Inst, OperandVector &Operands, MCContext &Ctx,
1040 const MCInstrInfo &MII, MCStreamer &Out) {
1041 EmitInstruction(Out, Inst);
1044 void X86AsmInstrumentation::EmitInstruction(MCStreamer &Out,
1045 const MCInst &Inst) {
1046 Out.EmitInstruction(Inst, *STI);
1049 unsigned X86AsmInstrumentation::GetFrameRegGeneric(const MCContext &Ctx,
1051 if (!Out.getNumFrameInfos()) // No active dwarf frame
1052 return X86::NoRegister;
1053 const MCDwarfFrameInfo &Frame = Out.getDwarfFrameInfos().back();
1054 if (Frame.End) // Active dwarf frame is closed
1055 return X86::NoRegister;
1056 const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
1057 if (!MRI) // No register info
1058 return X86::NoRegister;
1060 if (InitialFrameReg) {
1061 // FrameReg is set explicitly, we're instrumenting a MachineFunction.
1062 return InitialFrameReg;
1065 return MRI->getLLVMRegNum(Frame.CurrentCfaRegister, true /* IsEH */);
1068 X86AsmInstrumentation *
1069 CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
1070 const MCContext &Ctx, const MCSubtargetInfo *&STI) {
1071 Triple T(STI->getTargetTriple());
1072 const bool hasCompilerRTSupport = T.isOSLinux();
1073 if (ClAsanInstrumentAssembly && hasCompilerRTSupport &&
1074 MCOptions.SanitizeAddress) {
1075 if (STI->getFeatureBits()[X86::Mode32Bit] != 0)
1076 return new X86AddressSanitizer32(STI);
1077 if (STI->getFeatureBits()[X86::Mode64Bit] != 0)
1078 return new X86AddressSanitizer64(STI);
1080 return new X86AsmInstrumentation(STI);
1083 } // end llvm namespace