X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FAsmParser%2FX86AsmInstrumentation.cpp;h=543af8e4e77dd70c99611fd45f8d226822928193;hb=2b5910a7675ddb005e72b0536499a2b983813dff;hp=a365f62190d2b1a89685a1977f63c7359919a038;hpb=5ba71b01d8dcd4f3bf0a17413c4ebbd27b2b3730;p=oota-llvm.git diff --git a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp index a365f62190d..543af8e4e77 100644 --- a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp +++ b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp @@ -10,9 +10,12 @@ #include "MCTargetDesc/X86BaseInfo.h" #include "X86AsmInstrumentation.h" #include "X86Operand.h" +#include "X86RegisterInfo.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" +#include "llvm/CodeGen/MachineValueType.h" #include "llvm/IR/Function.h" +#include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstBuilder.h" @@ -23,6 +26,73 @@ #include "llvm/MC/MCTargetAsmParser.h" #include "llvm/MC/MCTargetOptions.h" #include "llvm/Support/CommandLine.h" +#include +#include +#include + +// Following comment describes how assembly instrumentation works. +// Currently we have only AddressSanitizer instrumentation, but we're +// planning to implement MemorySanitizer for inline assembly too. If +// you're not familiar with AddressSanitizer algorithm, please, read +// https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm. +// +// When inline assembly is parsed by an instance of X86AsmParser, all +// instructions are emitted via EmitInstruction method. That's the +// place where X86AsmInstrumentation analyzes an instruction and +// decides, whether the instruction should be emitted as is or +// instrumentation is required. The latter case happens when an +// instruction reads from or writes to memory. Now instruction opcode +// is explicitly checked, and if an instruction has a memory operand +// (for instance, movq (%rsi, %rcx, 8), %rax) - it should be +// instrumented. There're also exist instructions that modify +// memory but don't have an explicit memory operands, for instance, +// movs. +// +// Let's consider at first 8-byte memory accesses when an instruction +// has an explicit memory operand. In this case we need two registers - +// AddressReg to compute address of a memory cells which are accessed +// and ShadowReg to compute corresponding shadow address. So, we need +// to spill both registers before instrumentation code and restore them +// after instrumentation. Thus, in general, instrumentation code will +// look like this: +// PUSHF # Store flags, otherwise they will be overwritten +// PUSH AddressReg # spill AddressReg +// PUSH ShadowReg # spill ShadowReg +// LEA MemOp, AddressReg # compute address of the memory operand +// MOV AddressReg, ShadowReg +// SHR ShadowReg, 3 +// # ShadowOffset(AddressReg >> 3) contains address of a shadow +// # corresponding to MemOp. +// CMP ShadowOffset(ShadowReg), 0 # test shadow value +// JZ .Done # when shadow equals to zero, everything is fine +// MOV AddressReg, RDI +// # Call __asan_report function with AddressReg as an argument +// CALL __asan_report +// .Done: +// POP ShadowReg # Restore ShadowReg +// POP AddressReg # Restore AddressReg +// POPF # Restore flags +// +// Memory accesses with different size (1-, 2-, 4- and 16-byte) are +// handled in a similar manner, but small memory accesses (less than 8 +// byte) require an additional ScratchReg, which is used for shadow value. +// +// If, suppose, we're instrumenting an instruction like movs, only +// contents of RDI, RDI + AccessSize * RCX, RSI, RSI + AccessSize * +// RCX are checked. In this case there're no need to spill and restore +// AddressReg , ShadowReg or flags four times, they're saved on stack +// just once, before instrumentation of these four addresses, and restored +// at the end of the instrumentation. +// +// There exist several things which complicate this simple algorithm. +// * Instrumented memory operand can have RSP as a base or an index +// register. So we need to add a constant offset before computation +// of memory address, since flags, AddressReg, ShadowReg, etc. were +// already stored on stack and RSP was modified. +// * Debug info (usually, DWARF) should be adjusted, because sometimes +// RSP is used as a frame register. So, we need to select some +// register as a frame register and temprorary override current CFA +// register. namespace llvm { namespace { @@ -32,10 +102,23 @@ static cl::opt ClAsanInstrumentAssembly( cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden, cl::init(false)); -bool IsStackReg(unsigned Reg) { - return Reg == X86::RSP || Reg == X86::ESP || Reg == X86::SP; +const int64_t MinAllowedDisplacement = std::numeric_limits::min(); +const int64_t MaxAllowedDisplacement = std::numeric_limits::max(); + +int64_t ApplyDisplacementBounds(int64_t Displacement) { + return std::max(std::min(MaxAllowedDisplacement, Displacement), + MinAllowedDisplacement); +} + +void CheckDisplacementBounds(int64_t Displacement) { + assert(Displacement >= MinAllowedDisplacement && + Displacement <= MaxAllowedDisplacement); } +bool IsStackReg(unsigned Reg) { return Reg == X86::RSP || Reg == X86::ESP; } + +bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; } + std::string FuncName(unsigned AccessSize, bool IsWrite) { return std::string("__asan_report_") + (IsWrite ? "store" : "load") + utostr(AccessSize); @@ -43,60 +126,264 @@ std::string FuncName(unsigned AccessSize, bool IsWrite) { class X86AddressSanitizer : public X86AsmInstrumentation { public: - X86AddressSanitizer(const MCSubtargetInfo &STI) : STI(STI) {} + struct RegisterContext { + private: + enum RegOffset { + REG_OFFSET_ADDRESS = 0, + REG_OFFSET_SHADOW, + REG_OFFSET_SCRATCH + }; + + public: + RegisterContext(unsigned AddressReg, unsigned ShadowReg, + unsigned ScratchReg) { + BusyRegs.push_back(convReg(AddressReg, MVT::i64)); + BusyRegs.push_back(convReg(ShadowReg, MVT::i64)); + BusyRegs.push_back(convReg(ScratchReg, MVT::i64)); + } + + unsigned AddressReg(MVT::SimpleValueType VT) const { + return convReg(BusyRegs[REG_OFFSET_ADDRESS], VT); + } + + unsigned ShadowReg(MVT::SimpleValueType VT) const { + return convReg(BusyRegs[REG_OFFSET_SHADOW], VT); + } + + unsigned ScratchReg(MVT::SimpleValueType VT) const { + return convReg(BusyRegs[REG_OFFSET_SCRATCH], VT); + } + + void AddBusyReg(unsigned Reg) { + if (Reg != X86::NoRegister) + BusyRegs.push_back(convReg(Reg, MVT::i64)); + } + + void AddBusyRegs(const X86Operand &Op) { + AddBusyReg(Op.getMemBaseReg()); + AddBusyReg(Op.getMemIndexReg()); + } + + unsigned ChooseFrameReg(MVT::SimpleValueType VT) const { + static const MCPhysReg Candidates[] = { X86::RBP, X86::RAX, X86::RBX, + X86::RCX, X86::RDX, X86::RDI, + X86::RSI }; + for (unsigned Reg : Candidates) { + if (!std::count(BusyRegs.begin(), BusyRegs.end(), Reg)) + return convReg(Reg, VT); + } + return X86::NoRegister; + } + + private: + unsigned convReg(unsigned Reg, MVT::SimpleValueType VT) const { + return Reg == X86::NoRegister ? Reg : getX86SubSuperRegister(Reg, VT); + } + + std::vector BusyRegs; + }; + + X86AddressSanitizer(const MCSubtargetInfo &STI) + : X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {} + virtual ~X86AddressSanitizer() {} // X86AsmInstrumentation implementation: - virtual void InstrumentInstruction( - const MCInst &Inst, OperandVector &Operands, MCContext &Ctx, - const MCInstrInfo &MII, MCStreamer &Out) override { + virtual void InstrumentAndEmitInstruction(const MCInst &Inst, + OperandVector &Operands, + MCContext &Ctx, + const MCInstrInfo &MII, + MCStreamer &Out) override { + InstrumentMOVS(Inst, Operands, Ctx, MII, Out); + if (RepPrefix) + EmitInstruction(Out, MCInstBuilder(X86::REP_PREFIX)); + InstrumentMOV(Inst, Operands, Ctx, MII, Out); - } - // Should be implemented differently in x86_32 and x86_64 subclasses. - virtual void InstrumentMemOperandSmallImpl( - X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx, - MCStreamer &Out) = 0; - virtual void InstrumentMemOperandLargeImpl( - X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx, - MCStreamer &Out) = 0; + RepPrefix = (Inst.getOpcode() == X86::REP_PREFIX); + if (!RepPrefix) + EmitInstruction(Out, Inst); + } - void InstrumentMemOperand(MCParsedAsmOperand &Op, unsigned AccessSize, - bool IsWrite, MCContext &Ctx, MCStreamer &Out); + // Adjusts up stack and saves all registers used in instrumentation. + virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, + MCContext &Ctx, + MCStreamer &Out) = 0; + + // Restores all registers used in instrumentation and adjusts stack. + virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx, + MCContext &Ctx, + MCStreamer &Out) = 0; + + virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize, + bool IsWrite, + const RegisterContext &RegCtx, + MCContext &Ctx, MCStreamer &Out) = 0; + virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize, + bool IsWrite, + const RegisterContext &RegCtx, + MCContext &Ctx, MCStreamer &Out) = 0; + + virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx, + MCStreamer &Out) = 0; + + void InstrumentMemOperand(X86Operand &Op, unsigned AccessSize, bool IsWrite, + const RegisterContext &RegCtx, MCContext &Ctx, + MCStreamer &Out); + void InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg, unsigned CntReg, + unsigned AccessSize, MCContext &Ctx, MCStreamer &Out); + + void InstrumentMOVS(const MCInst &Inst, OperandVector &Operands, + MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out); void InstrumentMOV(const MCInst &Inst, OperandVector &Operands, MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out); - void EmitInstruction(MCStreamer &Out, const MCInst &Inst) { - Out.EmitInstruction(Inst, STI); - } +protected: void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); } -protected: - const MCSubtargetInfo &STI; + void EmitLEA(X86Operand &Op, MVT::SimpleValueType VT, unsigned Reg, + MCStreamer &Out) { + assert(VT == MVT::i32 || VT == MVT::i64); + MCInst Inst; + Inst.setOpcode(VT == MVT::i32 ? X86::LEA32r : X86::LEA64r); + Inst.addOperand(MCOperand::CreateReg(getX86SubSuperRegister(Reg, VT))); + Op.addMemOperands(Inst, 5); + EmitInstruction(Out, Inst); + } + + void ComputeMemOperandAddress(X86Operand &Op, MVT::SimpleValueType VT, + unsigned Reg, MCContext &Ctx, MCStreamer &Out); + + // Creates new memory operand with Displacement added to an original + // displacement. Residue will contain a residue which could happen when the + // total displacement exceeds 32-bit limitation. + std::unique_ptr AddDisplacement(X86Operand &Op, + int64_t Displacement, + MCContext &Ctx, int64_t *Residue); + + bool is64BitMode() const { + return (STI.getFeatureBits() & X86::Mode64Bit) != 0; + } + bool is32BitMode() const { + return (STI.getFeatureBits() & X86::Mode32Bit) != 0; + } + bool is16BitMode() const { + return (STI.getFeatureBits() & X86::Mode16Bit) != 0; + } + + unsigned getPointerWidth() { + if (is16BitMode()) return 16; + if (is32BitMode()) return 32; + if (is64BitMode()) return 64; + llvm_unreachable("invalid mode"); + } + + // True when previous instruction was actually REP prefix. + bool RepPrefix; + + // Offset from the original SP register. + int64_t OrigSPOffset; }; void X86AddressSanitizer::InstrumentMemOperand( - MCParsedAsmOperand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx, - MCStreamer &Out) { + X86Operand &Op, unsigned AccessSize, bool IsWrite, + const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) { assert(Op.isMem() && "Op should be a memory operand."); assert((AccessSize & (AccessSize - 1)) == 0 && AccessSize <= 16 && "AccessSize should be a power of two, less or equal than 16."); + // FIXME: take into account load/store alignment. + if (IsSmallMemAccess(AccessSize)) + InstrumentMemOperandSmall(Op, AccessSize, IsWrite, RegCtx, Ctx, Out); + else + InstrumentMemOperandLarge(Op, AccessSize, IsWrite, RegCtx, Ctx, Out); +} + +void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg, + unsigned CntReg, + unsigned AccessSize, + MCContext &Ctx, MCStreamer &Out) { + // FIXME: check whole ranges [DstReg .. DstReg + AccessSize * (CntReg - 1)] + // and [SrcReg .. SrcReg + AccessSize * (CntReg - 1)]. + RegisterContext RegCtx(X86::RDX /* AddressReg */, X86::RAX /* ShadowReg */, + IsSmallMemAccess(AccessSize) + ? X86::RBX + : X86::NoRegister /* ScratchReg */); + RegCtx.AddBusyReg(DstReg); + RegCtx.AddBusyReg(SrcReg); + RegCtx.AddBusyReg(CntReg); + + InstrumentMemOperandPrologue(RegCtx, Ctx, Out); + + // Test (%SrcReg) + { + const MCExpr *Disp = MCConstantExpr::Create(0, Ctx); + std::unique_ptr Op(X86Operand::CreateMem( + getPointerWidth(), 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc())); + InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx, + Out); + } - X86Operand &MemOp = static_cast(Op); - // FIXME: get rid of this limitation. - if (IsStackReg(MemOp.getMemBaseReg()) || IsStackReg(MemOp.getMemIndexReg())) + // Test -1(%SrcReg, %CntReg, AccessSize) + { + const MCExpr *Disp = MCConstantExpr::Create(-1, Ctx); + std::unique_ptr Op(X86Operand::CreateMem( + getPointerWidth(), 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(), + SMLoc())); + InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx, + Out); + } + + // Test (%DstReg) + { + const MCExpr *Disp = MCConstantExpr::Create(0, Ctx); + std::unique_ptr Op(X86Operand::CreateMem( + getPointerWidth(), 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc())); + InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out); + } + + // Test -1(%DstReg, %CntReg, AccessSize) + { + const MCExpr *Disp = MCConstantExpr::Create(-1, Ctx); + std::unique_ptr Op(X86Operand::CreateMem( + getPointerWidth(), 0, Disp, DstReg, CntReg, AccessSize, SMLoc(), + SMLoc())); + InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out); + } + + InstrumentMemOperandEpilogue(RegCtx, Ctx, Out); +} + +void X86AddressSanitizer::InstrumentMOVS(const MCInst &Inst, + OperandVector &Operands, + MCContext &Ctx, const MCInstrInfo &MII, + MCStreamer &Out) { + // Access size in bytes. + unsigned AccessSize = 0; + + switch (Inst.getOpcode()) { + case X86::MOVSB: + AccessSize = 1; + break; + case X86::MOVSW: + AccessSize = 2; + break; + case X86::MOVSL: + AccessSize = 4; + break; + case X86::MOVSQ: + AccessSize = 8; + break; + default: return; + } - // FIXME: take into account load/store alignment. - if (AccessSize < 8) - InstrumentMemOperandSmallImpl(MemOp, AccessSize, IsWrite, Ctx, Out); - else - InstrumentMemOperandLargeImpl(MemOp, AccessSize, IsWrite, Ctx, Out); + InstrumentMOVSImpl(AccessSize, Ctx, Out); } -void X86AddressSanitizer::InstrumentMOV( - const MCInst &Inst, OperandVector &Operands, MCContext &Ctx, - const MCInstrInfo &MII, MCStreamer &Out) { +void X86AddressSanitizer::InstrumentMOV(const MCInst &Inst, + OperandVector &Operands, MCContext &Ctx, + const MCInstrInfo &MII, + MCStreamer &Out) { // Access size in bytes. unsigned AccessSize = 0; @@ -132,41 +419,201 @@ void X86AddressSanitizer::InstrumentMOV( } const bool IsWrite = MII.get(Inst.getOpcode()).mayStore(); + for (unsigned Ix = 0; Ix < Operands.size(); ++Ix) { assert(Operands[Ix]); MCParsedAsmOperand &Op = *Operands[Ix]; - if (Op.isMem()) - InstrumentMemOperand(Op, AccessSize, IsWrite, Ctx, Out); + if (Op.isMem()) { + X86Operand &MemOp = static_cast(Op); + RegisterContext RegCtx( + X86::RDI /* AddressReg */, X86::RAX /* ShadowReg */, + IsSmallMemAccess(AccessSize) ? X86::RCX + : X86::NoRegister /* ScratchReg */); + RegCtx.AddBusyRegs(MemOp); + InstrumentMemOperandPrologue(RegCtx, Ctx, Out); + InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out); + InstrumentMemOperandEpilogue(RegCtx, Ctx, Out); + } + } +} + +void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op, + MVT::SimpleValueType VT, + unsigned Reg, MCContext &Ctx, + MCStreamer &Out) { + int64_t Displacement = 0; + if (IsStackReg(Op.getMemBaseReg())) + Displacement -= OrigSPOffset; + if (IsStackReg(Op.getMemIndexReg())) + Displacement -= OrigSPOffset * Op.getMemScale(); + + assert(Displacement >= 0); + + // Emit Op as is. + if (Displacement == 0) { + EmitLEA(Op, VT, Reg, Out); + return; + } + + int64_t Residue; + std::unique_ptr NewOp = + AddDisplacement(Op, Displacement, Ctx, &Residue); + EmitLEA(*NewOp, VT, Reg, Out); + + while (Residue != 0) { + const MCConstantExpr *Disp = + MCConstantExpr::Create(ApplyDisplacementBounds(Residue), Ctx); + std::unique_ptr DispOp = + X86Operand::CreateMem(getPointerWidth(), 0, Disp, Reg, 0, 1, SMLoc(), + SMLoc()); + EmitLEA(*DispOp, VT, Reg, Out); + Residue -= Disp->getValue(); } } +std::unique_ptr +X86AddressSanitizer::AddDisplacement(X86Operand &Op, int64_t Displacement, + MCContext &Ctx, int64_t *Residue) { + assert(Displacement >= 0); + + if (Displacement == 0 || + (Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant)) { + *Residue = Displacement; + return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(), + Op.getMemDisp(), Op.getMemBaseReg(), + Op.getMemIndexReg(), Op.getMemScale(), + SMLoc(), SMLoc()); + } + + int64_t OrigDisplacement = + static_cast(Op.getMemDisp())->getValue(); + CheckDisplacementBounds(OrigDisplacement); + Displacement += OrigDisplacement; + + int64_t NewDisplacement = ApplyDisplacementBounds(Displacement); + CheckDisplacementBounds(NewDisplacement); + + *Residue = Displacement - NewDisplacement; + const MCExpr *Disp = MCConstantExpr::Create(NewDisplacement, Ctx); + return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(), Disp, + Op.getMemBaseReg(), Op.getMemIndexReg(), + Op.getMemScale(), SMLoc(), SMLoc()); +} + class X86AddressSanitizer32 : public X86AddressSanitizer { public: static const long kShadowOffset = 0x20000000; X86AddressSanitizer32(const MCSubtargetInfo &STI) : X86AddressSanitizer(STI) {} + virtual ~X86AddressSanitizer32() {} - virtual void InstrumentMemOperandSmallImpl( - X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx, - MCStreamer &Out) override; - virtual void InstrumentMemOperandLargeImpl( - X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx, - MCStreamer &Out) override; + unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) { + unsigned FrameReg = GetFrameRegGeneric(Ctx, Out); + if (FrameReg == X86::NoRegister) + return FrameReg; + return getX86SubSuperRegister(FrameReg, MVT::i32); + } + + void SpillReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(Reg)); + OrigSPOffset -= 4; + } + + void RestoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(Reg)); + OrigSPOffset += 4; + } + + void StoreFlags(MCStreamer &Out) { + EmitInstruction(Out, MCInstBuilder(X86::PUSHF32)); + OrigSPOffset -= 4; + } + + void RestoreFlags(MCStreamer &Out) { + EmitInstruction(Out, MCInstBuilder(X86::POPF32)); + OrigSPOffset += 4; + } + + virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, + MCContext &Ctx, + MCStreamer &Out) override { + unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32); + assert(LocalFrameReg != X86::NoRegister); + + const MCRegisterInfo *MRI = Ctx.getRegisterInfo(); + unsigned FrameReg = GetFrameReg(Ctx, Out); + if (MRI && FrameReg != X86::NoRegister) { + SpillReg(Out, LocalFrameReg); + if (FrameReg == X86::ESP) { + Out.EmitCFIAdjustCfaOffset(4 /* byte size of the LocalFrameReg */); + Out.EmitCFIRelOffset( + MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0); + } + EmitInstruction( + Out, + MCInstBuilder(X86::MOV32rr).addReg(LocalFrameReg).addReg(FrameReg)); + Out.EmitCFIRememberState(); + Out.EmitCFIDefCfaRegister( + MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */)); + } + + SpillReg(Out, RegCtx.AddressReg(MVT::i32)); + SpillReg(Out, RegCtx.ShadowReg(MVT::i32)); + if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister) + SpillReg(Out, RegCtx.ScratchReg(MVT::i32)); + StoreFlags(Out); + } - private: - void EmitCallAsanReport(MCContext &Ctx, MCStreamer &Out, unsigned AccessSize, - bool IsWrite, unsigned AddressReg) { + virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx, + MCContext &Ctx, + MCStreamer &Out) override { + unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32); + assert(LocalFrameReg != X86::NoRegister); + + RestoreFlags(Out); + if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister) + RestoreReg(Out, RegCtx.ScratchReg(MVT::i32)); + RestoreReg(Out, RegCtx.ShadowReg(MVT::i32)); + RestoreReg(Out, RegCtx.AddressReg(MVT::i32)); + + unsigned FrameReg = GetFrameReg(Ctx, Out); + if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) { + RestoreReg(Out, LocalFrameReg); + Out.EmitCFIRestoreState(); + if (FrameReg == X86::ESP) + Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the LocalFrameReg */); + } + } + + virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize, + bool IsWrite, + const RegisterContext &RegCtx, + MCContext &Ctx, + MCStreamer &Out) override; + virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize, + bool IsWrite, + const RegisterContext &RegCtx, + MCContext &Ctx, + MCStreamer &Out) override; + virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx, + MCStreamer &Out) override; + +private: + void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx, + MCStreamer &Out, const RegisterContext &RegCtx) { EmitInstruction(Out, MCInstBuilder(X86::CLD)); EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS)); - EmitInstruction(Out, MCInstBuilder(X86::AND64ri8).addReg(X86::ESP) - .addReg(X86::ESP).addImm(-16)); - EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(AddressReg)); + EmitInstruction(Out, MCInstBuilder(X86::AND64ri8) + .addReg(X86::ESP) + .addReg(X86::ESP) + .addImm(-16)); + EmitInstruction( + Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.AddressReg(MVT::i32))); - - const std::string& Fn = FuncName(AccessSize, IsWrite); + const std::string &Fn = FuncName(AccessSize, IsWrite); MCSymbol *FnSym = Ctx.GetOrCreateSymbol(StringRef(Fn)); const MCSymbolRefExpr *FnExpr = MCSymbolRefExpr::Create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx); @@ -174,136 +621,140 @@ public: } }; -void X86AddressSanitizer32::InstrumentMemOperandSmallImpl( - X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx, - MCStreamer &Out) { - EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(X86::EAX)); - EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(X86::ECX)); - EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(X86::EDX)); - EmitInstruction(Out, MCInstBuilder(X86::PUSHF32)); +void X86AddressSanitizer32::InstrumentMemOperandSmall( + X86Operand &Op, unsigned AccessSize, bool IsWrite, + const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) { + unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32); + unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32); + unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8); - { - MCInst Inst; - Inst.setOpcode(X86::LEA32r); - Inst.addOperand(MCOperand::CreateReg(X86::EAX)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } + assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister); + unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32); - EmitInstruction( - Out, MCInstBuilder(X86::MOV32rr).addReg(X86::ECX).addReg(X86::EAX)); - EmitInstruction(Out, MCInstBuilder(X86::SHR32ri).addReg(X86::ECX) - .addReg(X86::ECX).addImm(3)); + ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out); + + EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg( + AddressRegI32)); + EmitInstruction(Out, MCInstBuilder(X86::SHR32ri) + .addReg(ShadowRegI32) + .addReg(ShadowRegI32) + .addImm(3)); { MCInst Inst; Inst.setOpcode(X86::MOV8rm); - Inst.addOperand(MCOperand::CreateReg(X86::CL)); + Inst.addOperand(MCOperand::CreateReg(ShadowRegI8)); const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx); std::unique_ptr Op( - X86Operand::CreateMem(0, Disp, X86::ECX, 0, 1, SMLoc(), SMLoc())); + X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1, + SMLoc(), SMLoc())); Op->addMemOperands(Inst, 5); EmitInstruction(Out, Inst); } - EmitInstruction(Out, - MCInstBuilder(X86::TEST8rr).addReg(X86::CL).addReg(X86::CL)); + EmitInstruction( + Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8)); MCSymbol *DoneSym = Ctx.CreateTempSymbol(); const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); - EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); - EmitInstruction( - Out, MCInstBuilder(X86::MOV32rr).addReg(X86::EDX).addReg(X86::EAX)); - EmitInstruction(Out, MCInstBuilder(X86::AND32ri).addReg(X86::EDX) - .addReg(X86::EDX).addImm(7)); + EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg( + AddressRegI32)); + EmitInstruction(Out, MCInstBuilder(X86::AND32ri) + .addReg(ScratchRegI32) + .addReg(ScratchRegI32) + .addImm(7)); switch (AccessSize) { + default: llvm_unreachable("Incorrect access size"); case 1: break; case 2: { - MCInst Inst; - Inst.setOpcode(X86::LEA32r); - Inst.addOperand(MCOperand::CreateReg(X86::EDX)); - const MCExpr *Disp = MCConstantExpr::Create(1, Ctx); std::unique_ptr Op( - X86Operand::CreateMem(0, Disp, X86::EDX, 0, 1, SMLoc(), SMLoc())); - Op->addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); + X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1, + SMLoc(), SMLoc())); + EmitLEA(*Op, MVT::i32, ScratchRegI32, Out); break; } case 4: - EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8).addReg(X86::EDX) - .addReg(X86::EDX).addImm(3)); - break; - default: - assert(false && "Incorrect access size"); + EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8) + .addReg(ScratchRegI32) + .addReg(ScratchRegI32) + .addImm(3)); break; } EmitInstruction( - Out, MCInstBuilder(X86::MOVSX32rr8).addReg(X86::ECX).addReg(X86::CL)); - EmitInstruction( - Out, MCInstBuilder(X86::CMP32rr).addReg(X86::EDX).addReg(X86::ECX)); - EmitInstruction(Out, MCInstBuilder(X86::JL_4).addExpr(DoneExpr)); + Out, + MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8)); + EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg( + ShadowRegI32)); + EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr)); - EmitCallAsanReport(Ctx, Out, AccessSize, IsWrite, X86::EAX); + EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx); EmitLabel(Out, DoneSym); - - EmitInstruction(Out, MCInstBuilder(X86::POPF32)); - EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(X86::EDX)); - EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(X86::ECX)); - EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(X86::EAX)); } -void X86AddressSanitizer32::InstrumentMemOperandLargeImpl( - X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx, - MCStreamer &Out) { - EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(X86::EAX)); - EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(X86::ECX)); - EmitInstruction(Out, MCInstBuilder(X86::PUSHF32)); +void X86AddressSanitizer32::InstrumentMemOperandLarge( + X86Operand &Op, unsigned AccessSize, bool IsWrite, + const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) { + unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32); + unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32); - { - MCInst Inst; - Inst.setOpcode(X86::LEA32r); - Inst.addOperand(MCOperand::CreateReg(X86::EAX)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } - EmitInstruction( - Out, MCInstBuilder(X86::MOV32rr).addReg(X86::ECX).addReg(X86::EAX)); - EmitInstruction(Out, MCInstBuilder(X86::SHR32ri).addReg(X86::ECX) - .addReg(X86::ECX).addImm(3)); + ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out); + + EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg( + AddressRegI32)); + EmitInstruction(Out, MCInstBuilder(X86::SHR32ri) + .addReg(ShadowRegI32) + .addReg(ShadowRegI32) + .addImm(3)); { MCInst Inst; switch (AccessSize) { - case 8: - Inst.setOpcode(X86::CMP8mi); - break; - case 16: - Inst.setOpcode(X86::CMP16mi); - break; - default: - assert(false && "Incorrect access size"); - break; + default: llvm_unreachable("Incorrect access size"); + case 8: + Inst.setOpcode(X86::CMP8mi); + break; + case 16: + Inst.setOpcode(X86::CMP16mi); + break; } const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx); std::unique_ptr Op( - X86Operand::CreateMem(0, Disp, X86::ECX, 0, 1, SMLoc(), SMLoc())); + X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1, + SMLoc(), SMLoc())); Op->addMemOperands(Inst, 5); Inst.addOperand(MCOperand::CreateImm(0)); EmitInstruction(Out, Inst); } MCSymbol *DoneSym = Ctx.CreateTempSymbol(); const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); - EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); - EmitCallAsanReport(Ctx, Out, AccessSize, IsWrite, X86::EAX); + EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx); EmitLabel(Out, DoneSym); +} - EmitInstruction(Out, MCInstBuilder(X86::POPF32)); - EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(X86::ECX)); - EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(X86::EAX)); +void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize, + MCContext &Ctx, + MCStreamer &Out) { + StoreFlags(Out); + + // No need to test when ECX is equals to zero. + MCSymbol *DoneSym = Ctx.CreateTempSymbol(); + const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); + EmitInstruction( + Out, MCInstBuilder(X86::TEST32rr).addReg(X86::ECX).addReg(X86::ECX)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); + + // Instrument first and last elements in src and dst range. + InstrumentMOVSBase(X86::EDI /* DstReg */, X86::ESI /* SrcReg */, + X86::ECX /* CntReg */, AccessSize, Ctx, Out); + + EmitLabel(Out, DoneSym); + RestoreFlags(Out); } class X86AddressSanitizer64 : public X86AddressSanitizer { @@ -312,37 +763,127 @@ public: X86AddressSanitizer64(const MCSubtargetInfo &STI) : X86AddressSanitizer(STI) {} + virtual ~X86AddressSanitizer64() {} - virtual void InstrumentMemOperandSmallImpl( - X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx, - MCStreamer &Out) override; - virtual void InstrumentMemOperandLargeImpl( - X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx, - MCStreamer &Out) override; + unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) { + unsigned FrameReg = GetFrameRegGeneric(Ctx, Out); + if (FrameReg == X86::NoRegister) + return FrameReg; + return getX86SubSuperRegister(FrameReg, MVT::i64); + } + + void SpillReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(Reg)); + OrigSPOffset -= 8; + } + + void RestoreReg(MCStreamer &Out, unsigned Reg) { + EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(Reg)); + OrigSPOffset += 8; + } + + void StoreFlags(MCStreamer &Out) { + EmitInstruction(Out, MCInstBuilder(X86::PUSHF64)); + OrigSPOffset -= 8; + } + + void RestoreFlags(MCStreamer &Out) { + EmitInstruction(Out, MCInstBuilder(X86::POPF64)); + OrigSPOffset += 8; + } + + virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx, + MCContext &Ctx, + MCStreamer &Out) override { + unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64); + assert(LocalFrameReg != X86::NoRegister); + + const MCRegisterInfo *MRI = Ctx.getRegisterInfo(); + unsigned FrameReg = GetFrameReg(Ctx, Out); + if (MRI && FrameReg != X86::NoRegister) { + SpillReg(Out, X86::RBP); + if (FrameReg == X86::RSP) { + Out.EmitCFIAdjustCfaOffset(8 /* byte size of the LocalFrameReg */); + Out.EmitCFIRelOffset( + MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0); + } + EmitInstruction( + Out, + MCInstBuilder(X86::MOV64rr).addReg(LocalFrameReg).addReg(FrameReg)); + Out.EmitCFIRememberState(); + Out.EmitCFIDefCfaRegister( + MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */)); + } + + EmitAdjustRSP(Ctx, Out, -128); + SpillReg(Out, RegCtx.ShadowReg(MVT::i64)); + SpillReg(Out, RegCtx.AddressReg(MVT::i64)); + if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister) + SpillReg(Out, RegCtx.ScratchReg(MVT::i64)); + StoreFlags(Out); + } + + virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx, + MCContext &Ctx, + MCStreamer &Out) override { + unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64); + assert(LocalFrameReg != X86::NoRegister); + + RestoreFlags(Out); + if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister) + RestoreReg(Out, RegCtx.ScratchReg(MVT::i64)); + RestoreReg(Out, RegCtx.AddressReg(MVT::i64)); + RestoreReg(Out, RegCtx.ShadowReg(MVT::i64)); + EmitAdjustRSP(Ctx, Out, 128); + + unsigned FrameReg = GetFrameReg(Ctx, Out); + if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) { + RestoreReg(Out, LocalFrameReg); + Out.EmitCFIRestoreState(); + if (FrameReg == X86::RSP) + Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the LocalFrameReg */); + } + } + + virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize, + bool IsWrite, + const RegisterContext &RegCtx, + MCContext &Ctx, + MCStreamer &Out) override; + virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize, + bool IsWrite, + const RegisterContext &RegCtx, + MCContext &Ctx, + MCStreamer &Out) override; + virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx, + MCStreamer &Out) override; private: void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) { - MCInst Inst; - Inst.setOpcode(X86::LEA64r); - Inst.addOperand(MCOperand::CreateReg(X86::RSP)); - const MCExpr *Disp = MCConstantExpr::Create(Offset, Ctx); std::unique_ptr Op( - X86Operand::CreateMem(0, Disp, X86::RSP, 0, 1, SMLoc(), SMLoc())); - Op->addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); + X86Operand::CreateMem(getPointerWidth(), 0, Disp, X86::RSP, 0, 1, + SMLoc(), SMLoc())); + EmitLEA(*Op, MVT::i64, X86::RSP, Out); + OrigSPOffset += Offset; } - void EmitCallAsanReport(MCContext &Ctx, MCStreamer &Out, unsigned AccessSize, - bool IsWrite) { + void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx, + MCStreamer &Out, const RegisterContext &RegCtx) { EmitInstruction(Out, MCInstBuilder(X86::CLD)); EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS)); - EmitInstruction(Out, MCInstBuilder(X86::AND64ri8).addReg(X86::RSP) - .addReg(X86::RSP).addImm(-16)); + EmitInstruction(Out, MCInstBuilder(X86::AND64ri8) + .addReg(X86::RSP) + .addReg(X86::RSP) + .addImm(-16)); - const std::string& Fn = FuncName(AccessSize, IsWrite); + if (RegCtx.AddressReg(MVT::i64) != X86::RDI) { + EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RDI).addReg( + RegCtx.AddressReg(MVT::i64))); + } + const std::string &Fn = FuncName(AccessSize, IsWrite); MCSymbol *FnSym = Ctx.GetOrCreateSymbol(StringRef(Fn)); const MCSymbolRefExpr *FnExpr = MCSymbolRefExpr::Create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx); @@ -350,119 +891,111 @@ private: } }; -void X86AddressSanitizer64::InstrumentMemOperandSmallImpl( - X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx, - MCStreamer &Out) { - EmitAdjustRSP(Ctx, Out, -128); - EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(X86::RAX)); - EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(X86::RCX)); - EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(X86::RDI)); - EmitInstruction(Out, MCInstBuilder(X86::PUSHF64)); - { - MCInst Inst; - Inst.setOpcode(X86::LEA64r); - Inst.addOperand(MCOperand::CreateReg(X86::RDI)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } - EmitInstruction( - Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RAX).addReg(X86::RDI)); - EmitInstruction(Out, MCInstBuilder(X86::SHR64ri).addReg(X86::RAX) - .addReg(X86::RAX).addImm(3)); +void X86AddressSanitizer64::InstrumentMemOperandSmall( + X86Operand &Op, unsigned AccessSize, bool IsWrite, + const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) { + unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64); + unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32); + unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64); + unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32); + unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8); + + assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister); + unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32); + + ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out); + + EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg( + AddressRegI64)); + EmitInstruction(Out, MCInstBuilder(X86::SHR64ri) + .addReg(ShadowRegI64) + .addReg(ShadowRegI64) + .addImm(3)); { MCInst Inst; Inst.setOpcode(X86::MOV8rm); - Inst.addOperand(MCOperand::CreateReg(X86::AL)); + Inst.addOperand(MCOperand::CreateReg(ShadowRegI8)); const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx); std::unique_ptr Op( - X86Operand::CreateMem(0, Disp, X86::RAX, 0, 1, SMLoc(), SMLoc())); + X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1, + SMLoc(), SMLoc())); Op->addMemOperands(Inst, 5); EmitInstruction(Out, Inst); } - EmitInstruction(Out, - MCInstBuilder(X86::TEST8rr).addReg(X86::AL).addReg(X86::AL)); + EmitInstruction( + Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8)); MCSymbol *DoneSym = Ctx.CreateTempSymbol(); const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); - EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); - EmitInstruction( - Out, MCInstBuilder(X86::MOV32rr).addReg(X86::ECX).addReg(X86::EDI)); - EmitInstruction(Out, MCInstBuilder(X86::AND32ri).addReg(X86::ECX) - .addReg(X86::ECX).addImm(7)); + EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg( + AddressRegI32)); + EmitInstruction(Out, MCInstBuilder(X86::AND32ri) + .addReg(ScratchRegI32) + .addReg(ScratchRegI32) + .addImm(7)); switch (AccessSize) { + default: llvm_unreachable("Incorrect access size"); case 1: break; case 2: { - MCInst Inst; - Inst.setOpcode(X86::LEA32r); - Inst.addOperand(MCOperand::CreateReg(X86::ECX)); - const MCExpr *Disp = MCConstantExpr::Create(1, Ctx); std::unique_ptr Op( - X86Operand::CreateMem(0, Disp, X86::ECX, 0, 1, SMLoc(), SMLoc())); - Op->addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); + X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1, + SMLoc(), SMLoc())); + EmitLEA(*Op, MVT::i32, ScratchRegI32, Out); break; } case 4: - EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8).addReg(X86::ECX) - .addReg(X86::ECX).addImm(3)); - break; - default: - assert(false && "Incorrect access size"); + EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8) + .addReg(ScratchRegI32) + .addReg(ScratchRegI32) + .addImm(3)); break; } EmitInstruction( - Out, MCInstBuilder(X86::MOVSX32rr8).addReg(X86::EAX).addReg(X86::AL)); - EmitInstruction( - Out, MCInstBuilder(X86::CMP32rr).addReg(X86::ECX).addReg(X86::EAX)); - EmitInstruction(Out, MCInstBuilder(X86::JL_4).addExpr(DoneExpr)); + Out, + MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8)); + EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg( + ShadowRegI32)); + EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr)); - EmitCallAsanReport(Ctx, Out, AccessSize, IsWrite); + EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx); EmitLabel(Out, DoneSym); - - EmitInstruction(Out, MCInstBuilder(X86::POPF64)); - EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(X86::RDI)); - EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(X86::RCX)); - EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(X86::RAX)); - EmitAdjustRSP(Ctx, Out, 128); } -void X86AddressSanitizer64::InstrumentMemOperandLargeImpl( - X86Operand &Op, unsigned AccessSize, bool IsWrite, MCContext &Ctx, - MCStreamer &Out) { - EmitAdjustRSP(Ctx, Out, -128); - EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(X86::RAX)); - EmitInstruction(Out, MCInstBuilder(X86::PUSHF64)); +void X86AddressSanitizer64::InstrumentMemOperandLarge( + X86Operand &Op, unsigned AccessSize, bool IsWrite, + const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) { + unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64); + unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64); - { - MCInst Inst; - Inst.setOpcode(X86::LEA64r); - Inst.addOperand(MCOperand::CreateReg(X86::RAX)); - Op.addMemOperands(Inst, 5); - EmitInstruction(Out, Inst); - } - EmitInstruction(Out, MCInstBuilder(X86::SHR64ri).addReg(X86::RAX) - .addReg(X86::RAX).addImm(3)); + ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out); + + EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg( + AddressRegI64)); + EmitInstruction(Out, MCInstBuilder(X86::SHR64ri) + .addReg(ShadowRegI64) + .addReg(ShadowRegI64) + .addImm(3)); { MCInst Inst; switch (AccessSize) { + default: llvm_unreachable("Incorrect access size"); case 8: Inst.setOpcode(X86::CMP8mi); break; case 16: Inst.setOpcode(X86::CMP16mi); break; - default: - assert(false && "Incorrect access size"); - break; } const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx); std::unique_ptr Op( - X86Operand::CreateMem(0, Disp, X86::RAX, 0, 1, SMLoc(), SMLoc())); + X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1, + SMLoc(), SMLoc())); Op->addMemOperands(Inst, 5); Inst.addOperand(MCOperand::CreateImm(0)); EmitInstruction(Out, Inst); @@ -470,24 +1003,68 @@ void X86AddressSanitizer64::InstrumentMemOperandLargeImpl( MCSymbol *DoneSym = Ctx.CreateTempSymbol(); const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); - EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); - EmitCallAsanReport(Ctx, Out, AccessSize, IsWrite); + EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx); EmitLabel(Out, DoneSym); +} - EmitInstruction(Out, MCInstBuilder(X86::POPF64)); - EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(X86::RAX)); - EmitAdjustRSP(Ctx, Out, 128); +void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize, + MCContext &Ctx, + MCStreamer &Out) { + StoreFlags(Out); + + // No need to test when RCX is equals to zero. + MCSymbol *DoneSym = Ctx.CreateTempSymbol(); + const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); + EmitInstruction( + Out, MCInstBuilder(X86::TEST64rr).addReg(X86::RCX).addReg(X86::RCX)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); + + // Instrument first and last elements in src and dst range. + InstrumentMOVSBase(X86::RDI /* DstReg */, X86::RSI /* SrcReg */, + X86::RCX /* CntReg */, AccessSize, Ctx, Out); + + EmitLabel(Out, DoneSym); + RestoreFlags(Out); } } // End anonymous namespace -X86AsmInstrumentation::X86AsmInstrumentation() {} +X86AsmInstrumentation::X86AsmInstrumentation(const MCSubtargetInfo &STI) + : STI(STI), InitialFrameReg(0) {} + X86AsmInstrumentation::~X86AsmInstrumentation() {} -void X86AsmInstrumentation::InstrumentInstruction( +void X86AsmInstrumentation::InstrumentAndEmitInstruction( const MCInst &Inst, OperandVector &Operands, MCContext &Ctx, - const MCInstrInfo &MII, MCStreamer &Out) {} + const MCInstrInfo &MII, MCStreamer &Out) { + EmitInstruction(Out, Inst); +} + +void X86AsmInstrumentation::EmitInstruction(MCStreamer &Out, + const MCInst &Inst) { + Out.EmitInstruction(Inst, STI); +} + +unsigned X86AsmInstrumentation::GetFrameRegGeneric(const MCContext &Ctx, + MCStreamer &Out) { + if (!Out.getNumFrameInfos()) // No active dwarf frame + return X86::NoRegister; + const MCDwarfFrameInfo &Frame = Out.getDwarfFrameInfos().back(); + if (Frame.End) // Active dwarf frame is closed + return X86::NoRegister; + const MCRegisterInfo *MRI = Ctx.getRegisterInfo(); + if (!MRI) // No register info + return X86::NoRegister; + + if (InitialFrameReg) { + // FrameReg is set explicitly, we're instrumenting a MachineFunction. + return InitialFrameReg; + } + + return MRI->getLLVMRegNum(Frame.CurrentCfaRegister, true /* IsEH */); +} X86AsmInstrumentation * CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions, @@ -501,7 +1078,7 @@ CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions, if ((STI.getFeatureBits() & X86::Mode64Bit) != 0) return new X86AddressSanitizer64(STI); } - return new X86AsmInstrumentation(); + return new X86AsmInstrumentation(STI); } } // End llvm namespace