1 //===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Details of the algorithm:
12 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/Instrumentation.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DenseSet.h"
20 #include "llvm/ADT/DepthFirstIterator.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallString.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/IR/CallSite.h"
28 #include "llvm/IR/DIBuilder.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/InlineAsm.h"
33 #include "llvm/IR/InstVisitor.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/MDBuilder.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/DataTypes.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/Endian.h"
43 #include "llvm/Transforms/Scalar.h"
44 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
45 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
46 #include "llvm/Transforms/Utils/Cloning.h"
47 #include "llvm/Transforms/Utils/Local.h"
48 #include "llvm/Transforms/Utils/ModuleUtils.h"
51 #include <system_error>
55 #define DEBUG_TYPE "asan"
57 static const uint64_t kDefaultShadowScale = 3;
58 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
59 static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
60 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
61 static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G.
62 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
63 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
64 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
65 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
67 static const size_t kMinStackMallocSize = 1 << 6; // 64B
68 static const size_t kMaxStackMallocSize = 1 << 16; // 64K
69 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
70 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
72 static const char *const kAsanModuleCtorName = "asan.module_ctor";
73 static const char *const kAsanModuleDtorName = "asan.module_dtor";
74 static const uint64_t kAsanCtorAndDtorPriority = 1;
75 static const char *const kAsanReportErrorTemplate = "__asan_report_";
76 static const char *const kAsanReportLoadN = "__asan_report_load_n";
77 static const char *const kAsanReportStoreN = "__asan_report_store_n";
78 static const char *const kAsanRegisterGlobalsName = "__asan_register_globals";
79 static const char *const kAsanUnregisterGlobalsName =
80 "__asan_unregister_globals";
81 static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
82 static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
83 static const char *const kAsanInitName = "__asan_init_v4";
84 static const char *const kAsanCovModuleInitName = "__sanitizer_cov_module_init";
85 static const char *const kAsanCovName = "__sanitizer_cov";
86 static const char *const kAsanCovIndirCallName = "__sanitizer_cov_indir_call16";
87 static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
88 static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
89 static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return";
90 static const int kMaxAsanStackMallocSizeClass = 10;
91 static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_";
92 static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_";
93 static const char *const kAsanGenPrefix = "__asan_gen_";
94 static const char *const kAsanPoisonStackMemoryName =
95 "__asan_poison_stack_memory";
96 static const char *const kAsanUnpoisonStackMemoryName =
97 "__asan_unpoison_stack_memory";
99 static const char *const kAsanOptionDetectUAR =
100 "__asan_option_detect_stack_use_after_return";
103 static const int kAsanStackAfterReturnMagic = 0xf5;
106 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
107 static const size_t kNumberOfAccessSizes = 5;
109 // Command-line flags.
111 // This flag may need to be replaced with -f[no-]asan-reads.
112 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
113 cl::desc("instrument read instructions"), cl::Hidden, cl::init(true));
114 static cl::opt<bool> ClInstrumentWrites("asan-instrument-writes",
115 cl::desc("instrument write instructions"), cl::Hidden, cl::init(true));
116 static cl::opt<bool> ClInstrumentAtomics("asan-instrument-atomics",
117 cl::desc("instrument atomic instructions (rmw, cmpxchg)"),
118 cl::Hidden, cl::init(true));
119 static cl::opt<bool> ClAlwaysSlowPath("asan-always-slow-path",
120 cl::desc("use instrumentation with slow path for all accesses"),
121 cl::Hidden, cl::init(false));
122 // This flag limits the number of instructions to be instrumented
123 // in any given BB. Normally, this should be set to unlimited (INT_MAX),
124 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
126 static cl::opt<int> ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb",
128 cl::desc("maximal number of instructions to instrument in any given BB"),
130 // This flag may need to be replaced with -f[no]asan-stack.
131 static cl::opt<bool> ClStack("asan-stack",
132 cl::desc("Handle stack memory"), cl::Hidden, cl::init(true));
133 static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
134 cl::desc("Check return-after-free"), cl::Hidden, cl::init(true));
135 // This flag may need to be replaced with -f[no]asan-globals.
136 static cl::opt<bool> ClGlobals("asan-globals",
137 cl::desc("Handle global objects"), cl::Hidden, cl::init(true));
138 static cl::opt<int> ClCoverage("asan-coverage",
139 cl::desc("ASan coverage. 0: none, 1: entry block, 2: all blocks, "
140 "3: all blocks and critical edges, "
141 "4: above plus indirect calls"),
142 cl::Hidden, cl::init(false));
143 static cl::opt<int> ClCoverageBlockThreshold("asan-coverage-block-threshold",
144 cl::desc("Add coverage instrumentation only to the entry block if there "
145 "are more than this number of blocks."),
146 cl::Hidden, cl::init(1500));
147 static cl::opt<bool> ClInitializers("asan-initialization-order",
148 cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true));
149 static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair",
150 cl::desc("Instrument <, <=, >, >=, - with pointer operands"),
151 cl::Hidden, cl::init(false));
152 static cl::opt<unsigned> ClRealignStack("asan-realign-stack",
153 cl::desc("Realign stack to the value of this flag (power of two)"),
154 cl::Hidden, cl::init(32));
155 static cl::opt<int> ClInstrumentationWithCallsThreshold(
156 "asan-instrumentation-with-call-threshold",
157 cl::desc("If the function being instrumented contains more than "
158 "this number of memory accesses, use callbacks instead of "
159 "inline checks (-1 means never use callbacks)."),
160 cl::Hidden, cl::init(7000));
161 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
162 "asan-memory-access-callback-prefix",
163 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
164 cl::init("__asan_"));
166 // This is an experimental feature that will allow to choose between
167 // instrumented and non-instrumented code at link-time.
168 // If this option is on, just before instrumenting a function we create its
169 // clone; if the function is not changed by asan the clone is deleted.
170 // If we end up with a clone, we put the instrumented function into a section
171 // called "ASAN" and the uninstrumented function into a section called "NOASAN".
173 // This is still a prototype, we need to figure out a way to keep two copies of
174 // a function so that the linker can easily choose one of them.
175 static cl::opt<bool> ClKeepUninstrumented("asan-keep-uninstrumented-functions",
176 cl::desc("Keep uninstrumented copies of functions"),
177 cl::Hidden, cl::init(false));
179 // These flags allow to change the shadow mapping.
180 // The shadow mapping looks like
181 // Shadow = (Mem >> scale) + (1 << offset_log)
182 static cl::opt<int> ClMappingScale("asan-mapping-scale",
183 cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0));
185 // Optimization flags. Not user visible, used mostly for testing
186 // and benchmarking the tool.
187 static cl::opt<bool> ClOpt("asan-opt",
188 cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true));
189 static cl::opt<bool> ClOptSameTemp("asan-opt-same-temp",
190 cl::desc("Instrument the same temp just once"), cl::Hidden,
192 static cl::opt<bool> ClOptGlobals("asan-opt-globals",
193 cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true));
195 static cl::opt<bool> ClCheckLifetime("asan-check-lifetime",
196 cl::desc("Use llvm.lifetime intrinsics to insert extra checks"),
197 cl::Hidden, cl::init(false));
200 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
202 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
203 cl::Hidden, cl::init(0));
204 static cl::opt<std::string> ClDebugFunc("asan-debug-func",
205 cl::Hidden, cl::desc("Debug func"));
206 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
207 cl::Hidden, cl::init(-1));
208 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"),
209 cl::Hidden, cl::init(-1));
211 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
212 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
213 STATISTIC(NumOptimizedAccessesToGlobalArray,
214 "Number of optimized accesses to global arrays");
215 STATISTIC(NumOptimizedAccessesToGlobalVar,
216 "Number of optimized accesses to global vars");
219 /// Frontend-provided metadata for source location.
220 struct LocationMetadata {
225 LocationMetadata() : Filename(), LineNo(0), ColumnNo(0) {}
227 bool empty() const { return Filename.empty(); }
229 void parse(MDNode *MDN) {
230 assert(MDN->getNumOperands() == 3);
231 MDString *MDFilename = cast<MDString>(MDN->getOperand(0));
232 Filename = MDFilename->getString();
233 LineNo = cast<ConstantInt>(MDN->getOperand(1))->getLimitedValue();
234 ColumnNo = cast<ConstantInt>(MDN->getOperand(2))->getLimitedValue();
238 /// Frontend-provided metadata for global variables.
239 class GlobalsMetadata {
243 : SourceLoc(), Name(), IsDynInit(false),
244 IsBlacklisted(false) {}
245 LocationMetadata SourceLoc;
251 GlobalsMetadata() : inited_(false) {}
253 void init(Module& M) {
256 NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
259 for (const Value *MDV : Globals->operands()) {
260 const MDNode *MDN = cast<MDNode>(MDV);
262 // Metadata node contains the global and the fields of "Entry".
263 assert(MDN->getNumOperands() == 5);
264 Value *V = MDN->getOperand(0);
265 // The optimizer may optimize away a global entirely.
268 GlobalVariable *GV = cast<GlobalVariable>(V);
269 // We can already have an entry for GV if it was merged with another
271 Entry &E = Entries[GV];
272 if (Value *Loc = MDN->getOperand(1))
273 E.SourceLoc.parse(cast<MDNode>(Loc));
274 if (Value *Name = MDN->getOperand(2)) {
275 MDString *MDName = cast<MDString>(Name);
276 E.Name = MDName->getString();
278 ConstantInt *IsDynInit = cast<ConstantInt>(MDN->getOperand(3));
279 E.IsDynInit |= IsDynInit->isOne();
280 ConstantInt *IsBlacklisted = cast<ConstantInt>(MDN->getOperand(4));
281 E.IsBlacklisted |= IsBlacklisted->isOne();
285 /// Returns metadata entry for a given global.
286 Entry get(GlobalVariable *G) const {
287 auto Pos = Entries.find(G);
288 return (Pos != Entries.end()) ? Pos->second : Entry();
293 DenseMap<GlobalVariable*, Entry> Entries;
296 /// This struct defines the shadow mapping using the rule:
297 /// shadow = (mem >> Scale) ADD-or-OR Offset.
298 struct ShadowMapping {
304 static ShadowMapping getShadowMapping(const Module &M, int LongSize) {
305 llvm::Triple TargetTriple(M.getTargetTriple());
306 bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android;
307 bool IsIOS = TargetTriple.isiOS();
308 bool IsFreeBSD = TargetTriple.getOS() == llvm::Triple::FreeBSD;
309 bool IsLinux = TargetTriple.getOS() == llvm::Triple::Linux;
310 bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 ||
311 TargetTriple.getArch() == llvm::Triple::ppc64le;
312 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
313 bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips ||
314 TargetTriple.getArch() == llvm::Triple::mipsel;
316 ShadowMapping Mapping;
318 if (LongSize == 32) {
322 Mapping.Offset = kMIPS32_ShadowOffset32;
324 Mapping.Offset = kFreeBSD_ShadowOffset32;
326 Mapping.Offset = kIOSShadowOffset32;
328 Mapping.Offset = kDefaultShadowOffset32;
329 } else { // LongSize == 64
331 Mapping.Offset = kPPC64_ShadowOffset64;
333 Mapping.Offset = kFreeBSD_ShadowOffset64;
334 else if (IsLinux && IsX86_64)
335 Mapping.Offset = kSmallX86_64ShadowOffset;
337 Mapping.Offset = kDefaultShadowOffset64;
340 Mapping.Scale = kDefaultShadowScale;
341 if (ClMappingScale) {
342 Mapping.Scale = ClMappingScale;
345 // OR-ing shadow offset if more efficient (at least on x86) if the offset
346 // is a power of two, but on ppc64 we have to use add since the shadow
347 // offset is not necessary 1/8-th of the address space.
348 Mapping.OrShadowOffset = !IsPPC64 && !(Mapping.Offset & (Mapping.Offset - 1));
353 static size_t RedzoneSizeForScale(int MappingScale) {
354 // Redzone used for stack and globals is at least 32 bytes.
355 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
356 return std::max(32U, 1U << MappingScale);
359 /// AddressSanitizer: instrument the code in module to find memory bugs.
360 struct AddressSanitizer : public FunctionPass {
361 AddressSanitizer() : FunctionPass(ID) {
362 initializeBreakCriticalEdgesPass(*PassRegistry::getPassRegistry());
364 const char *getPassName() const override {
365 return "AddressSanitizerFunctionPass";
367 void instrumentMop(Instruction *I, bool UseCalls);
368 void instrumentPointerComparisonOrSubtraction(Instruction *I);
369 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
370 Value *Addr, uint32_t TypeSize, bool IsWrite,
371 Value *SizeArgument, bool UseCalls);
372 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
373 Value *ShadowValue, uint32_t TypeSize);
374 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
375 bool IsWrite, size_t AccessSizeIndex,
376 Value *SizeArgument);
377 void instrumentMemIntrinsic(MemIntrinsic *MI);
378 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
379 bool runOnFunction(Function &F) override;
380 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
381 bool doInitialization(Module &M) override;
382 static char ID; // Pass identification, replacement for typeid
384 void getAnalysisUsage(AnalysisUsage &AU) const override {
386 AU.addRequiredID(BreakCriticalEdgesID);
390 void initializeCallbacks(Module &M);
392 bool LooksLikeCodeInBug11395(Instruction *I);
393 bool GlobalIsLinkerInitialized(GlobalVariable *G);
394 void InjectCoverageForIndirectCalls(Function &F,
395 ArrayRef<Instruction *> IndirCalls);
396 bool InjectCoverage(Function &F, ArrayRef<BasicBlock *> AllBlocks,
397 ArrayRef<Instruction *> IndirCalls);
398 void InjectCoverageAtBlock(Function &F, BasicBlock &BB);
401 const DataLayout *DL;
404 ShadowMapping Mapping;
405 Function *AsanCtorFunction;
406 Function *AsanInitFunction;
407 Function *AsanHandleNoReturnFunc;
408 Function *AsanCovFunction;
409 Function *AsanCovIndirCallFunction;
410 Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
411 // This array is indexed by AccessIsWrite and log2(AccessSize).
412 Function *AsanErrorCallback[2][kNumberOfAccessSizes];
413 Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes];
414 // This array is indexed by AccessIsWrite.
415 Function *AsanErrorCallbackSized[2],
416 *AsanMemoryAccessCallbackSized[2];
417 Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
419 GlobalsMetadata GlobalsMD;
421 friend struct FunctionStackPoisoner;
424 class AddressSanitizerModule : public ModulePass {
426 AddressSanitizerModule() : ModulePass(ID) {}
427 bool runOnModule(Module &M) override;
428 static char ID; // Pass identification, replacement for typeid
429 const char *getPassName() const override {
430 return "AddressSanitizerModule";
434 void initializeCallbacks(Module &M);
436 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M);
437 bool ShouldInstrumentGlobal(GlobalVariable *G);
438 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
439 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
440 size_t MinRedzoneSizeForGlobal() const {
441 return RedzoneSizeForScale(Mapping.Scale);
444 GlobalsMetadata GlobalsMD;
447 const DataLayout *DL;
448 ShadowMapping Mapping;
449 Function *AsanPoisonGlobals;
450 Function *AsanUnpoisonGlobals;
451 Function *AsanRegisterGlobals;
452 Function *AsanUnregisterGlobals;
453 Function *AsanCovModuleInit;
456 // Stack poisoning does not play well with exception handling.
457 // When an exception is thrown, we essentially bypass the code
458 // that unpoisones the stack. This is why the run-time library has
459 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
460 // stack in the interceptor. This however does not work inside the
461 // actual function which catches the exception. Most likely because the
462 // compiler hoists the load of the shadow value somewhere too high.
463 // This causes asan to report a non-existing bug on 453.povray.
464 // It sounds like an LLVM bug.
465 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
467 AddressSanitizer &ASan;
472 ShadowMapping Mapping;
474 SmallVector<AllocaInst*, 16> AllocaVec;
475 SmallVector<Instruction*, 8> RetVec;
476 unsigned StackAlignment;
478 Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
479 *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
480 Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc;
482 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
483 struct AllocaPoisonCall {
484 IntrinsicInst *InsBefore;
489 SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec;
491 // Maps Value to an AllocaInst from which the Value is originated.
492 typedef DenseMap<Value*, AllocaInst*> AllocaForValueMapTy;
493 AllocaForValueMapTy AllocaForValue;
495 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
496 : F(F), ASan(ASan), DIB(*F.getParent()), C(ASan.C),
497 IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)),
498 Mapping(ASan.Mapping),
499 StackAlignment(1 << Mapping.Scale) {}
501 bool runOnFunction() {
502 if (!ClStack) return false;
503 // Collect alloca, ret, lifetime instructions etc.
504 for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
507 if (AllocaVec.empty()) return false;
509 initializeCallbacks(*F.getParent());
519 // Finds all static Alloca instructions and puts
520 // poisoned red zones around all of them.
521 // Then unpoison everything back before the function returns.
524 // ----------------------- Visitors.
525 /// \brief Collect all Ret instructions.
526 void visitReturnInst(ReturnInst &RI) {
527 RetVec.push_back(&RI);
530 /// \brief Collect Alloca instructions we want (and can) handle.
531 void visitAllocaInst(AllocaInst &AI) {
532 if (!isInterestingAlloca(AI)) return;
534 StackAlignment = std::max(StackAlignment, AI.getAlignment());
535 AllocaVec.push_back(&AI);
538 /// \brief Collect lifetime intrinsic calls to check for use-after-scope
540 void visitIntrinsicInst(IntrinsicInst &II) {
541 if (!ClCheckLifetime) return;
542 Intrinsic::ID ID = II.getIntrinsicID();
543 if (ID != Intrinsic::lifetime_start &&
544 ID != Intrinsic::lifetime_end)
546 // Found lifetime intrinsic, add ASan instrumentation if necessary.
547 ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
548 // If size argument is undefined, don't do anything.
549 if (Size->isMinusOne()) return;
550 // Check that size doesn't saturate uint64_t and can
551 // be stored in IntptrTy.
552 const uint64_t SizeValue = Size->getValue().getLimitedValue();
553 if (SizeValue == ~0ULL ||
554 !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
556 // Find alloca instruction that corresponds to llvm.lifetime argument.
557 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1));
559 bool DoPoison = (ID == Intrinsic::lifetime_end);
560 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
561 AllocaPoisonCallVec.push_back(APC);
564 // ---------------------- Helpers.
565 void initializeCallbacks(Module &M);
567 // Check if we want (and can) handle this alloca.
568 bool isInterestingAlloca(AllocaInst &AI) const {
569 return (!AI.isArrayAllocation() && AI.isStaticAlloca() &&
570 AI.getAllocatedType()->isSized() &&
571 // alloca() may be called with 0 size, ignore it.
572 getAllocaSizeInBytes(&AI) > 0);
575 uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
576 Type *Ty = AI->getAllocatedType();
577 uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty);
580 /// Finds alloca where the value comes from.
581 AllocaInst *findAllocaForValue(Value *V);
582 void poisonRedZones(ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB,
583 Value *ShadowBase, bool DoPoison);
584 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
586 void SetShadowToStackAfterReturnInlined(IRBuilder<> &IRB, Value *ShadowBase,
592 char AddressSanitizer::ID = 0;
593 INITIALIZE_PASS(AddressSanitizer, "asan",
594 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.",
596 FunctionPass *llvm::createAddressSanitizerFunctionPass() {
597 return new AddressSanitizer();
600 char AddressSanitizerModule::ID = 0;
601 INITIALIZE_PASS(AddressSanitizerModule, "asan-module",
602 "AddressSanitizer: detects use-after-free and out-of-bounds bugs."
603 "ModulePass", false, false)
604 ModulePass *llvm::createAddressSanitizerModulePass() {
605 return new AddressSanitizerModule();
608 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
609 size_t Res = countTrailingZeros(TypeSize / 8);
610 assert(Res < kNumberOfAccessSizes);
614 // \brief Create a constant for Str so that we can pass it to the run-time lib.
615 static GlobalVariable *createPrivateGlobalForString(
616 Module &M, StringRef Str, bool AllowMerging) {
617 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
618 // We use private linkage for module-local strings. If they can be merged
619 // with another one, we set the unnamed_addr attribute.
621 new GlobalVariable(M, StrConst->getType(), true,
622 GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix);
624 GV->setUnnamedAddr(true);
625 GV->setAlignment(1); // Strings may not be merged w/o setting align 1.
629 /// \brief Create a global describing a source location.
630 static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M,
631 LocationMetadata MD) {
632 Constant *LocData[] = {
633 createPrivateGlobalForString(M, MD.Filename, true),
634 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo),
635 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo),
637 auto LocStruct = ConstantStruct::getAnon(LocData);
638 auto GV = new GlobalVariable(M, LocStruct->getType(), true,
639 GlobalValue::PrivateLinkage, LocStruct,
641 GV->setUnnamedAddr(true);
645 static bool GlobalWasGeneratedByAsan(GlobalVariable *G) {
646 return G->getName().find(kAsanGenPrefix) == 0;
649 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
651 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
652 if (Mapping.Offset == 0)
654 // (Shadow >> scale) | offset
655 if (Mapping.OrShadowOffset)
656 return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
658 return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
661 // Instrument memset/memmove/memcpy
662 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
664 if (isa<MemTransferInst>(MI)) {
666 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
667 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
668 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
669 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
670 } else if (isa<MemSetInst>(MI)) {
673 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
674 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
675 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
677 MI->eraseFromParent();
680 // If I is an interesting memory access, return the PointerOperand
681 // and set IsWrite/Alignment. Otherwise return NULL.
682 static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
683 unsigned *Alignment) {
684 // Skip memory accesses inserted by another instrumentation.
685 if (I->getMetadata("nosanitize"))
687 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
688 if (!ClInstrumentReads) return nullptr;
690 *Alignment = LI->getAlignment();
691 return LI->getPointerOperand();
693 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
694 if (!ClInstrumentWrites) return nullptr;
696 *Alignment = SI->getAlignment();
697 return SI->getPointerOperand();
699 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
700 if (!ClInstrumentAtomics) return nullptr;
703 return RMW->getPointerOperand();
705 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
706 if (!ClInstrumentAtomics) return nullptr;
709 return XCHG->getPointerOperand();
714 static bool isPointerOperand(Value *V) {
715 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
718 // This is a rough heuristic; it may cause both false positives and
719 // false negatives. The proper implementation requires cooperation with
721 static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) {
722 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
723 if (!Cmp->isRelational())
725 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
726 if (BO->getOpcode() != Instruction::Sub)
731 if (!isPointerOperand(I->getOperand(0)) ||
732 !isPointerOperand(I->getOperand(1)))
737 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
738 // If a global variable does not have dynamic initialization we don't
739 // have to instrument it. However, if a global does not have initializer
740 // at all, we assume it has dynamic initializer (in other TU).
741 return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit;
745 AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) {
747 Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
748 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
749 for (int i = 0; i < 2; i++) {
750 if (Param[i]->getType()->isPointerTy())
751 Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy);
753 IRB.CreateCall2(F, Param[0], Param[1]);
756 void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
757 bool IsWrite = false;
758 unsigned Alignment = 0;
759 Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment);
761 if (ClOpt && ClOptGlobals) {
762 if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) {
763 // If initialization order checking is disabled, a simple access to a
764 // dynamically initialized global is always valid.
765 if (!ClInitializers || GlobalIsLinkerInitialized(G)) {
766 NumOptimizedAccessesToGlobalVar++;
770 ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr);
771 if (CE && CE->isGEPWithNoNotionalOverIndexing()) {
772 if (GlobalVariable *G = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
773 if (CE->getOperand(1)->isNullValue() && GlobalIsLinkerInitialized(G)) {
774 NumOptimizedAccessesToGlobalArray++;
781 Type *OrigPtrTy = Addr->getType();
782 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
784 assert(OrigTy->isSized());
785 uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
787 assert((TypeSize % 8) == 0);
790 NumInstrumentedWrites++;
792 NumInstrumentedReads++;
794 unsigned Granularity = 1 << Mapping.Scale;
795 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
796 // if the data is properly aligned.
797 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
799 (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
800 return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls);
801 // Instrument unusual size or unusual alignment.
802 // We can not do it with a single check, so we do 1-byte check for the first
803 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
804 // to report the actual access size.
806 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
807 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
809 IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size);
811 Value *LastByte = IRB.CreateIntToPtr(
812 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
814 instrumentAddress(I, I, Addr, 8, IsWrite, Size, false);
815 instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false);
819 // Validate the result of Module::getOrInsertFunction called for an interface
820 // function of AddressSanitizer. If the instrumented module defines a function
821 // with the same name, their prototypes must match, otherwise
822 // getOrInsertFunction returns a bitcast.
823 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
824 if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast);
825 FuncOrBitcast->dump();
826 report_fatal_error("trying to redefine an AddressSanitizer "
827 "interface function");
830 Instruction *AddressSanitizer::generateCrashCode(
831 Instruction *InsertBefore, Value *Addr,
832 bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument) {
833 IRBuilder<> IRB(InsertBefore);
834 CallInst *Call = SizeArgument
835 ? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument)
836 : IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr);
838 // We don't do Call->setDoesNotReturn() because the BB already has
839 // UnreachableInst at the end.
840 // This EmptyAsm is required to avoid callback merge.
841 IRB.CreateCall(EmptyAsm);
845 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
848 size_t Granularity = 1 << Mapping.Scale;
849 // Addr & (Granularity - 1)
850 Value *LastAccessedByte = IRB.CreateAnd(
851 AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
852 // (Addr & (Granularity - 1)) + size - 1
853 if (TypeSize / 8 > 1)
854 LastAccessedByte = IRB.CreateAdd(
855 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
856 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
857 LastAccessedByte = IRB.CreateIntCast(
858 LastAccessedByte, ShadowValue->getType(), false);
859 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
860 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
863 void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
864 Instruction *InsertBefore, Value *Addr,
865 uint32_t TypeSize, bool IsWrite,
866 Value *SizeArgument, bool UseCalls) {
867 IRBuilder<> IRB(InsertBefore);
868 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
869 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
872 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex],
877 Type *ShadowTy = IntegerType::get(
878 *C, std::max(8U, TypeSize >> Mapping.Scale));
879 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
880 Value *ShadowPtr = memToShadow(AddrLong, IRB);
881 Value *CmpVal = Constant::getNullValue(ShadowTy);
882 Value *ShadowValue = IRB.CreateLoad(
883 IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
885 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
886 size_t Granularity = 1 << Mapping.Scale;
887 TerminatorInst *CrashTerm = nullptr;
889 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
890 // We use branch weights for the slow path check, to indicate that the slow
891 // path is rarely taken. This seems to be the case for SPEC benchmarks.
892 TerminatorInst *CheckTerm =
893 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false,
894 MDBuilder(*C).createBranchWeights(1, 100000));
895 assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
896 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
897 IRB.SetInsertPoint(CheckTerm);
898 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
899 BasicBlock *CrashBlock =
900 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
901 CrashTerm = new UnreachableInst(*C, CrashBlock);
902 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
903 ReplaceInstWithInst(CheckTerm, NewTerm);
905 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true);
908 Instruction *Crash = generateCrashCode(
909 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument);
910 Crash->setDebugLoc(OrigIns->getDebugLoc());
913 void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit,
914 GlobalValue *ModuleName) {
915 // Set up the arguments to our poison/unpoison functions.
916 IRBuilder<> IRB(GlobalInit.begin()->getFirstInsertionPt());
918 // Add a call to poison all external globals before the given function starts.
919 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
920 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
922 // Add calls to unpoison all globals before each return instruction.
923 for (auto &BB : GlobalInit.getBasicBlockList())
924 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
925 CallInst::Create(AsanUnpoisonGlobals, "", RI);
928 void AddressSanitizerModule::createInitializerPoisonCalls(
929 Module &M, GlobalValue *ModuleName) {
930 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
932 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
933 for (Use &OP : CA->operands()) {
934 if (isa<ConstantAggregateZero>(OP))
936 ConstantStruct *CS = cast<ConstantStruct>(OP);
938 // Must have a function or null ptr.
939 if (Function* F = dyn_cast<Function>(CS->getOperand(1))) {
940 if (F->getName() == kAsanModuleCtorName) continue;
941 ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
942 // Don't instrument CTORs that will run before asan.module_ctor.
943 if (Priority->getLimitedValue() <= kAsanCtorAndDtorPriority) continue;
944 poisonOneInitializer(*F, ModuleName);
949 bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
950 Type *Ty = cast<PointerType>(G->getType())->getElementType();
951 DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
953 if (GlobalsMD.get(G).IsBlacklisted) return false;
954 if (!Ty->isSized()) return false;
955 if (!G->hasInitializer()) return false;
956 if (GlobalWasGeneratedByAsan(G)) return false; // Our own global.
957 // Touch only those globals that will not be defined in other modules.
958 // Don't handle ODR linkage types and COMDATs since other modules may be built
960 if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
961 G->getLinkage() != GlobalVariable::PrivateLinkage &&
962 G->getLinkage() != GlobalVariable::InternalLinkage)
966 // Two problems with thread-locals:
967 // - The address of the main thread's copy can't be computed at link-time.
968 // - Need to poison all copies, not just the main thread's one.
969 if (G->isThreadLocal())
971 // For now, just ignore this Global if the alignment is large.
972 if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false;
974 if (G->hasSection()) {
975 StringRef Section(G->getSection());
976 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
977 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
979 if (Section.startswith("__OBJC,") ||
980 Section.startswith("__DATA, __objc_")) {
981 DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
984 // See http://code.google.com/p/address-sanitizer/issues/detail?id=32
985 // Constant CFString instances are compiled in the following way:
986 // -- the string buffer is emitted into
987 // __TEXT,__cstring,cstring_literals
988 // -- the constant NSConstantString structure referencing that buffer
989 // is placed into __DATA,__cfstring
990 // Therefore there's no point in placing redzones into __DATA,__cfstring.
991 // Moreover, it causes the linker to crash on OS X 10.7
992 if (Section.startswith("__DATA,__cfstring")) {
993 DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
996 // The linker merges the contents of cstring_literals and removes the
998 if (Section.startswith("__TEXT,__cstring,cstring_literals")) {
999 DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
1002 if (Section.startswith("__TEXT,__objc_methname,cstring_literals")) {
1003 DEBUG(dbgs() << "Ignoring objc_methname cstring global: " << *G << "\n");
1008 // Callbacks put into the CRT initializer/terminator sections
1009 // should not be instrumented.
1010 // See https://code.google.com/p/address-sanitizer/issues/detail?id=305
1011 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
1012 if (Section.startswith(".CRT")) {
1013 DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n");
1017 // Globals from llvm.metadata aren't emitted, do not instrument them.
1018 if (Section == "llvm.metadata") return false;
1024 void AddressSanitizerModule::initializeCallbacks(Module &M) {
1025 IRBuilder<> IRB(*C);
1026 // Declare our poisoning and unpoisoning functions.
1027 AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1028 kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, NULL));
1029 AsanPoisonGlobals->setLinkage(Function::ExternalLinkage);
1030 AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1031 kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL));
1032 AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage);
1033 // Declare functions that register/unregister globals.
1034 AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1035 kAsanRegisterGlobalsName, IRB.getVoidTy(),
1036 IntptrTy, IntptrTy, NULL));
1037 AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
1038 AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1039 kAsanUnregisterGlobalsName,
1040 IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1041 AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
1042 AsanCovModuleInit = checkInterfaceFunction(M.getOrInsertFunction(
1043 kAsanCovModuleInitName,
1044 IRB.getVoidTy(), IntptrTy, NULL));
1045 AsanCovModuleInit->setLinkage(Function::ExternalLinkage);
1048 // This function replaces all global variables with new variables that have
1049 // trailing redzones. It also creates a function that poisons
1050 // redzones and inserts this function into llvm.global_ctors.
1051 bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
1054 SmallVector<GlobalVariable *, 16> GlobalsToChange;
1056 for (auto &G : M.globals()) {
1057 if (ShouldInstrumentGlobal(&G))
1058 GlobalsToChange.push_back(&G);
1061 size_t n = GlobalsToChange.size();
1062 if (n == 0) return false;
1064 // A global is described by a structure
1067 // size_t size_with_redzone;
1068 // const char *name;
1069 // const char *module_name;
1070 // size_t has_dynamic_init;
1071 // void *source_location;
1072 // We initialize an array of such structures and pass it to a run-time call.
1073 StructType *GlobalStructTy =
1074 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
1075 IntptrTy, IntptrTy, NULL);
1076 SmallVector<Constant *, 16> Initializers(n);
1078 bool HasDynamicallyInitializedGlobals = false;
1080 // We shouldn't merge same module names, as this string serves as unique
1081 // module ID in runtime.
1082 GlobalVariable *ModuleName = createPrivateGlobalForString(
1083 M, M.getModuleIdentifier(), /*AllowMerging*/false);
1085 for (size_t i = 0; i < n; i++) {
1086 static const uint64_t kMaxGlobalRedzone = 1 << 18;
1087 GlobalVariable *G = GlobalsToChange[i];
1089 auto MD = GlobalsMD.get(G);
1090 // Create string holding the global name (use global name from metadata
1091 // if it's available, otherwise just write the name of global variable).
1092 GlobalVariable *Name = createPrivateGlobalForString(
1093 M, MD.Name.empty() ? G->getName() : MD.Name,
1094 /*AllowMerging*/ true);
1096 PointerType *PtrTy = cast<PointerType>(G->getType());
1097 Type *Ty = PtrTy->getElementType();
1098 uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
1099 uint64_t MinRZ = MinRedzoneSizeForGlobal();
1100 // MinRZ <= RZ <= kMaxGlobalRedzone
1101 // and trying to make RZ to be ~ 1/4 of SizeInBytes.
1102 uint64_t RZ = std::max(MinRZ,
1103 std::min(kMaxGlobalRedzone,
1104 (SizeInBytes / MinRZ / 4) * MinRZ));
1105 uint64_t RightRedzoneSize = RZ;
1106 // Round up to MinRZ
1107 if (SizeInBytes % MinRZ)
1108 RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
1109 assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
1110 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
1112 StructType *NewTy = StructType::get(Ty, RightRedZoneTy, NULL);
1113 Constant *NewInitializer = ConstantStruct::get(
1114 NewTy, G->getInitializer(),
1115 Constant::getNullValue(RightRedZoneTy), NULL);
1117 // Create a new global variable with enough space for a redzone.
1118 GlobalValue::LinkageTypes Linkage = G->getLinkage();
1119 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
1120 Linkage = GlobalValue::InternalLinkage;
1121 GlobalVariable *NewGlobal = new GlobalVariable(
1122 M, NewTy, G->isConstant(), Linkage,
1123 NewInitializer, "", G, G->getThreadLocalMode());
1124 NewGlobal->copyAttributesFrom(G);
1125 NewGlobal->setAlignment(MinRZ);
1128 Indices2[0] = IRB.getInt32(0);
1129 Indices2[1] = IRB.getInt32(0);
1131 G->replaceAllUsesWith(
1132 ConstantExpr::getGetElementPtr(NewGlobal, Indices2, true));
1133 NewGlobal->takeName(G);
1134 G->eraseFromParent();
1136 Constant *SourceLoc;
1137 if (!MD.SourceLoc.empty()) {
1138 auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc);
1139 SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy);
1141 SourceLoc = ConstantInt::get(IntptrTy, 0);
1144 Initializers[i] = ConstantStruct::get(
1145 GlobalStructTy, ConstantExpr::getPointerCast(NewGlobal, IntptrTy),
1146 ConstantInt::get(IntptrTy, SizeInBytes),
1147 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
1148 ConstantExpr::getPointerCast(Name, IntptrTy),
1149 ConstantExpr::getPointerCast(ModuleName, IntptrTy),
1150 ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, NULL);
1152 if (ClInitializers && MD.IsDynInit)
1153 HasDynamicallyInitializedGlobals = true;
1155 DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
1158 ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n);
1159 GlobalVariable *AllGlobals = new GlobalVariable(
1160 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
1161 ConstantArray::get(ArrayOfGlobalStructTy, Initializers), "");
1163 // Create calls for poisoning before initializers run and unpoisoning after.
1164 if (HasDynamicallyInitializedGlobals)
1165 createInitializerPoisonCalls(M, ModuleName);
1166 IRB.CreateCall2(AsanRegisterGlobals,
1167 IRB.CreatePointerCast(AllGlobals, IntptrTy),
1168 ConstantInt::get(IntptrTy, n));
1170 // We also need to unregister globals at the end, e.g. when a shared library
1172 Function *AsanDtorFunction = Function::Create(
1173 FunctionType::get(Type::getVoidTy(*C), false),
1174 GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
1175 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
1176 IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB));
1177 IRB_Dtor.CreateCall2(AsanUnregisterGlobals,
1178 IRB.CreatePointerCast(AllGlobals, IntptrTy),
1179 ConstantInt::get(IntptrTy, n));
1180 appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority);
1186 bool AddressSanitizerModule::runOnModule(Module &M) {
1187 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
1190 DL = &DLP->getDataLayout();
1191 C = &(M.getContext());
1192 int LongSize = DL->getPointerSizeInBits();
1193 IntptrTy = Type::getIntNTy(*C, LongSize);
1194 Mapping = getShadowMapping(M, LongSize);
1195 initializeCallbacks(M);
1197 bool Changed = false;
1199 Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
1201 IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
1203 if (ClCoverage > 0) {
1204 Function *CovFunc = M.getFunction(kAsanCovName);
1205 int nCov = CovFunc ? CovFunc->getNumUses() : 0;
1206 IRB.CreateCall(AsanCovModuleInit, ConstantInt::get(IntptrTy, nCov));
1211 Changed |= InstrumentGlobals(IRB, M);
1216 void AddressSanitizer::initializeCallbacks(Module &M) {
1217 IRBuilder<> IRB(*C);
1218 // Create __asan_report* callbacks.
1219 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
1220 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
1221 AccessSizeIndex++) {
1222 // IsWrite and TypeSize are encoded in the function name.
1223 std::string Suffix =
1224 (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex);
1225 AsanErrorCallback[AccessIsWrite][AccessSizeIndex] =
1226 checkInterfaceFunction(
1227 M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix,
1228 IRB.getVoidTy(), IntptrTy, NULL));
1229 AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
1230 checkInterfaceFunction(
1231 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix,
1232 IRB.getVoidTy(), IntptrTy, NULL));
1235 AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction(
1236 kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1237 AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction(
1238 kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1240 AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction(
1241 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN",
1242 IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1243 AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction(
1244 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN",
1245 IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1247 AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction(
1248 ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
1249 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL));
1250 AsanMemcpy = checkInterfaceFunction(M.getOrInsertFunction(
1251 ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
1252 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, NULL));
1253 AsanMemset = checkInterfaceFunction(M.getOrInsertFunction(
1254 ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(),
1255 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, NULL));
1257 AsanHandleNoReturnFunc = checkInterfaceFunction(
1258 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), NULL));
1259 AsanCovFunction = checkInterfaceFunction(M.getOrInsertFunction(
1260 kAsanCovName, IRB.getVoidTy(), NULL));
1261 AsanCovIndirCallFunction = checkInterfaceFunction(M.getOrInsertFunction(
1262 kAsanCovIndirCallName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1264 AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction(
1265 kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1266 AsanPtrSubFunction = checkInterfaceFunction(M.getOrInsertFunction(
1267 kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1268 // We insert an empty inline asm after __asan_report* to avoid callback merge.
1269 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
1270 StringRef(""), StringRef(""),
1271 /*hasSideEffects=*/true);
1275 bool AddressSanitizer::doInitialization(Module &M) {
1276 // Initialize the private fields. No one has accessed them before.
1277 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
1279 report_fatal_error("data layout missing");
1280 DL = &DLP->getDataLayout();
1284 C = &(M.getContext());
1285 LongSize = DL->getPointerSizeInBits();
1286 IntptrTy = Type::getIntNTy(*C, LongSize);
1288 AsanCtorFunction = Function::Create(
1289 FunctionType::get(Type::getVoidTy(*C), false),
1290 GlobalValue::InternalLinkage, kAsanModuleCtorName, &M);
1291 BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction);
1292 // call __asan_init in the module ctor.
1293 IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB));
1294 AsanInitFunction = checkInterfaceFunction(
1295 M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), NULL));
1296 AsanInitFunction->setLinkage(Function::ExternalLinkage);
1297 IRB.CreateCall(AsanInitFunction);
1299 Mapping = getShadowMapping(M, LongSize);
1301 appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
1305 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
1306 // For each NSObject descendant having a +load method, this method is invoked
1307 // by the ObjC runtime before any of the static constructors is called.
1308 // Therefore we need to instrument such methods with a call to __asan_init
1309 // at the beginning in order to initialize our runtime before any access to
1310 // the shadow memory.
1311 // We cannot just ignore these methods, because they may call other
1312 // instrumented functions.
1313 if (F.getName().find(" load]") != std::string::npos) {
1314 IRBuilder<> IRB(F.begin()->begin());
1315 IRB.CreateCall(AsanInitFunction);
1321 void AddressSanitizer::InjectCoverageAtBlock(Function &F, BasicBlock &BB) {
1322 BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end();
1323 // Skip static allocas at the top of the entry block so they don't become
1324 // dynamic when we split the block. If we used our optimized stack layout,
1325 // then there will only be one alloca and it will come first.
1326 for (; IP != BE; ++IP) {
1327 AllocaInst *AI = dyn_cast<AllocaInst>(IP);
1328 if (!AI || !AI->isStaticAlloca())
1332 DebugLoc EntryLoc = &BB == &F.getEntryBlock()
1333 ? IP->getDebugLoc().getFnDebugLoc(*C)
1334 : IP->getDebugLoc();
1335 IRBuilder<> IRB(IP);
1336 IRB.SetCurrentDebugLocation(EntryLoc);
1337 Type *Int8Ty = IRB.getInt8Ty();
1338 GlobalVariable *Guard = new GlobalVariable(
1339 *F.getParent(), Int8Ty, false, GlobalValue::PrivateLinkage,
1340 Constant::getNullValue(Int8Ty), "__asan_gen_cov_" + F.getName());
1341 LoadInst *Load = IRB.CreateLoad(Guard);
1342 Load->setAtomic(Monotonic);
1343 Load->setAlignment(1);
1344 Value *Cmp = IRB.CreateICmpEQ(Constant::getNullValue(Int8Ty), Load);
1345 Instruction *Ins = SplitBlockAndInsertIfThen(
1346 Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000));
1347 IRB.SetInsertPoint(Ins);
1348 IRB.SetCurrentDebugLocation(EntryLoc);
1349 // __sanitizer_cov gets the PC of the instruction using GET_CALLER_PC.
1350 IRB.CreateCall(AsanCovFunction);
1351 StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int8Ty, 1), Guard);
1352 Store->setAtomic(Monotonic);
1353 Store->setAlignment(1);
1356 // Poor man's coverage that works with ASan.
1357 // We create a Guard boolean variable with the same linkage
1358 // as the function and inject this code into the entry block (-asan-coverage=1)
1359 // or all blocks (-asan-coverage=2):
1361 // __sanitizer_cov();
1364 // The accesses to Guard are atomic. The rest of the logic is
1365 // in __sanitizer_cov (it's fine to call it more than once).
1367 // This coverage implementation provides very limited data:
1368 // it only tells if a given function (block) was ever executed.
1369 // No counters, no per-edge data.
1370 // But for many use cases this is what we need and the added slowdown
1371 // is negligible. This simple implementation will probably be obsoleted
1372 // by the upcoming Clang-based coverage implementation.
1373 // By having it here and now we hope to
1374 // a) get the functionality to users earlier and
1375 // b) collect usage statistics to help improve Clang coverage design.
1376 bool AddressSanitizer::InjectCoverage(Function &F,
1377 ArrayRef<BasicBlock *> AllBlocks,
1378 ArrayRef<Instruction*> IndirCalls) {
1379 if (!ClCoverage) return false;
1381 if (ClCoverage == 1 ||
1382 (unsigned)ClCoverageBlockThreshold < AllBlocks.size()) {
1383 InjectCoverageAtBlock(F, F.getEntryBlock());
1385 for (auto BB : AllBlocks)
1386 InjectCoverageAtBlock(F, *BB);
1388 InjectCoverageForIndirectCalls(F, IndirCalls);
1392 // On every indirect call we call a run-time function
1393 // __sanitizer_cov_indir_call* with two parameters:
1394 // - callee address,
1395 // - global cache array that contains kCacheSize pointers (zero-initialed).
1396 // The cache is used to speed up recording the caller-callee pairs.
1397 // The address of the caller is passed implicitly via caller PC.
1398 // kCacheSize is encoded in the name of the run-time function.
1399 void AddressSanitizer::InjectCoverageForIndirectCalls(
1400 Function &F, ArrayRef<Instruction *> IndirCalls) {
1401 if (ClCoverage < 4 || IndirCalls.empty()) return;
1402 const int kCacheSize = 16;
1403 const int kCacheAlignment = 64; // Align for better performance.
1404 Type *Ty = ArrayType::get(IntptrTy, kCacheSize);
1405 for (auto I : IndirCalls) {
1408 Value *Callee = CS.getCalledValue();
1409 if (dyn_cast<InlineAsm>(Callee)) continue;
1410 GlobalVariable *CalleeCache = new GlobalVariable(
1411 *F.getParent(), Ty, false, GlobalValue::PrivateLinkage,
1412 Constant::getNullValue(Ty), "__asan_gen_callee_cache");
1413 CalleeCache->setAlignment(kCacheAlignment);
1414 IRB.CreateCall2(AsanCovIndirCallFunction,
1415 IRB.CreatePointerCast(Callee, IntptrTy),
1416 IRB.CreatePointerCast(CalleeCache, IntptrTy));
1420 bool AddressSanitizer::runOnFunction(Function &F) {
1421 if (&F == AsanCtorFunction) return false;
1422 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
1423 DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
1424 initializeCallbacks(*F.getParent());
1426 // If needed, insert __asan_init before checking for SanitizeAddress attr.
1427 maybeInsertAsanInitAtFunctionEntry(F);
1429 if (!F.hasFnAttribute(Attribute::SanitizeAddress))
1432 if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
1435 // We want to instrument every address only once per basic block (unless there
1436 // are calls between uses).
1437 SmallSet<Value*, 16> TempsToInstrument;
1438 SmallVector<Instruction*, 16> ToInstrument;
1439 SmallVector<Instruction*, 8> NoReturnCalls;
1440 SmallVector<BasicBlock*, 16> AllBlocks;
1441 SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts;
1442 SmallVector<Instruction*, 8> IndirCalls;
1447 // Fill the set of memory operations to instrument.
1448 for (auto &BB : F) {
1449 AllBlocks.push_back(&BB);
1450 TempsToInstrument.clear();
1451 int NumInsnsPerBB = 0;
1452 for (auto &Inst : BB) {
1453 if (LooksLikeCodeInBug11395(&Inst)) return false;
1455 isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) {
1456 if (ClOpt && ClOptSameTemp) {
1457 if (!TempsToInstrument.insert(Addr))
1458 continue; // We've seen this temp in the current BB.
1460 } else if (ClInvalidPointerPairs &&
1461 isInterestingPointerComparisonOrSubtraction(&Inst)) {
1462 PointerComparisonsOrSubtracts.push_back(&Inst);
1464 } else if (isa<MemIntrinsic>(Inst)) {
1467 if (isa<AllocaInst>(Inst))
1471 // A call inside BB.
1472 TempsToInstrument.clear();
1473 if (CS.doesNotReturn())
1474 NoReturnCalls.push_back(CS.getInstruction());
1475 if (ClCoverage >= 4 && !CS.getCalledFunction())
1476 IndirCalls.push_back(&Inst);
1480 ToInstrument.push_back(&Inst);
1482 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB)
1487 Function *UninstrumentedDuplicate = nullptr;
1488 bool LikelyToInstrument =
1489 !NoReturnCalls.empty() || !ToInstrument.empty() || (NumAllocas > 0);
1490 if (ClKeepUninstrumented && LikelyToInstrument) {
1491 ValueToValueMapTy VMap;
1492 UninstrumentedDuplicate = CloneFunction(&F, VMap, false);
1493 UninstrumentedDuplicate->removeFnAttr(Attribute::SanitizeAddress);
1494 UninstrumentedDuplicate->setName("NOASAN_" + F.getName());
1495 F.getParent()->getFunctionList().push_back(UninstrumentedDuplicate);
1498 bool UseCalls = false;
1499 if (ClInstrumentationWithCallsThreshold >= 0 &&
1500 ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold)
1504 int NumInstrumented = 0;
1505 for (auto Inst : ToInstrument) {
1506 if (ClDebugMin < 0 || ClDebugMax < 0 ||
1507 (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
1508 if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment))
1509 instrumentMop(Inst, UseCalls);
1511 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
1516 FunctionStackPoisoner FSP(F, *this);
1517 bool ChangedStack = FSP.runOnFunction();
1519 // We must unpoison the stack before every NoReturn call (throw, _exit, etc).
1520 // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
1521 for (auto CI : NoReturnCalls) {
1522 IRBuilder<> IRB(CI);
1523 IRB.CreateCall(AsanHandleNoReturnFunc);
1526 for (auto Inst : PointerComparisonsOrSubtracts) {
1527 instrumentPointerComparisonOrSubtraction(Inst);
1531 bool res = NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty();
1533 if (InjectCoverage(F, AllBlocks, IndirCalls))
1536 DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n");
1538 if (ClKeepUninstrumented) {
1540 // No instrumentation is done, no need for the duplicate.
1541 if (UninstrumentedDuplicate)
1542 UninstrumentedDuplicate->eraseFromParent();
1544 // The function was instrumented. We must have the duplicate.
1545 assert(UninstrumentedDuplicate);
1546 UninstrumentedDuplicate->setSection("NOASAN");
1547 assert(!F.hasSection());
1548 F.setSection("ASAN");
1555 // Workaround for bug 11395: we don't want to instrument stack in functions
1556 // with large assembly blobs (32-bit only), otherwise reg alloc may crash.
1557 // FIXME: remove once the bug 11395 is fixed.
1558 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
1559 if (LongSize != 32) return false;
1560 CallInst *CI = dyn_cast<CallInst>(I);
1561 if (!CI || !CI->isInlineAsm()) return false;
1562 if (CI->getNumArgOperands() <= 5) return false;
1563 // We have inline assembly with quite a few arguments.
1567 void FunctionStackPoisoner::initializeCallbacks(Module &M) {
1568 IRBuilder<> IRB(*C);
1569 for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) {
1570 std::string Suffix = itostr(i);
1571 AsanStackMallocFunc[i] = checkInterfaceFunction(
1572 M.getOrInsertFunction(kAsanStackMallocNameTemplate + Suffix, IntptrTy,
1573 IntptrTy, IntptrTy, NULL));
1574 AsanStackFreeFunc[i] = checkInterfaceFunction(M.getOrInsertFunction(
1575 kAsanStackFreeNameTemplate + Suffix, IRB.getVoidTy(), IntptrTy,
1576 IntptrTy, IntptrTy, NULL));
1578 AsanPoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction(
1579 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1580 AsanUnpoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction(
1581 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL));
1585 FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes,
1586 IRBuilder<> &IRB, Value *ShadowBase,
1588 size_t n = ShadowBytes.size();
1590 // We need to (un)poison n bytes of stack shadow. Poison as many as we can
1591 // using 64-bit stores (if we are on 64-bit arch), then poison the rest
1592 // with 32-bit stores, then with 16-byte stores, then with 8-byte stores.
1593 for (size_t LargeStoreSizeInBytes = ASan.LongSize / 8;
1594 LargeStoreSizeInBytes != 0; LargeStoreSizeInBytes /= 2) {
1595 for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) {
1597 for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {
1598 if (ASan.DL->isLittleEndian())
1599 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
1601 Val = (Val << 8) | ShadowBytes[i + j];
1604 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
1605 Type *StoreTy = Type::getIntNTy(*C, LargeStoreSizeInBytes * 8);
1606 Value *Poison = ConstantInt::get(StoreTy, DoPoison ? Val : 0);
1607 IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, StoreTy->getPointerTo()));
1612 // Fake stack allocator (asan_fake_stack.h) has 11 size classes
1613 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
1614 static int StackMallocSizeClass(uint64_t LocalStackSize) {
1615 assert(LocalStackSize <= kMaxStackMallocSize);
1616 uint64_t MaxSize = kMinStackMallocSize;
1617 for (int i = 0; ; i++, MaxSize *= 2)
1618 if (LocalStackSize <= MaxSize)
1620 llvm_unreachable("impossible LocalStackSize");
1623 // Set Size bytes starting from ShadowBase to kAsanStackAfterReturnMagic.
1624 // We can not use MemSet intrinsic because it may end up calling the actual
1625 // memset. Size is a multiple of 8.
1626 // Currently this generates 8-byte stores on x86_64; it may be better to
1627 // generate wider stores.
1628 void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
1629 IRBuilder<> &IRB, Value *ShadowBase, int Size) {
1630 assert(!(Size % 8));
1631 assert(kAsanStackAfterReturnMagic == 0xf5);
1632 for (int i = 0; i < Size; i += 8) {
1633 Value *p = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
1634 IRB.CreateStore(ConstantInt::get(IRB.getInt64Ty(), 0xf5f5f5f5f5f5f5f5ULL),
1635 IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo()));
1639 static DebugLoc getFunctionEntryDebugLocation(Function &F) {
1640 for (const auto &Inst : F.getEntryBlock())
1641 if (!isa<AllocaInst>(Inst))
1642 return Inst.getDebugLoc();
1646 void FunctionStackPoisoner::poisonStack() {
1647 int StackMallocIdx = -1;
1648 DebugLoc EntryDebugLocation = getFunctionEntryDebugLocation(F);
1650 assert(AllocaVec.size() > 0);
1651 Instruction *InsBefore = AllocaVec[0];
1652 IRBuilder<> IRB(InsBefore);
1653 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1655 SmallVector<ASanStackVariableDescription, 16> SVD;
1656 SVD.reserve(AllocaVec.size());
1657 for (AllocaInst *AI : AllocaVec) {
1658 ASanStackVariableDescription D = { AI->getName().data(),
1659 getAllocaSizeInBytes(AI),
1660 AI->getAlignment(), AI, 0};
1663 // Minimal header size (left redzone) is 4 pointers,
1664 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
1665 size_t MinHeaderSize = ASan.LongSize / 2;
1666 ASanStackFrameLayout L;
1667 ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L);
1668 DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n");
1669 uint64_t LocalStackSize = L.FrameSize;
1670 bool DoStackMalloc =
1671 ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize;
1673 Type *ByteArrayTy = ArrayType::get(IRB.getInt8Ty(), LocalStackSize);
1674 AllocaInst *MyAlloca =
1675 new AllocaInst(ByteArrayTy, "MyAlloca", InsBefore);
1676 MyAlloca->setDebugLoc(EntryDebugLocation);
1677 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
1678 size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack);
1679 MyAlloca->setAlignment(FrameAlignment);
1680 assert(MyAlloca->isStaticAlloca());
1681 Value *OrigStackBase = IRB.CreatePointerCast(MyAlloca, IntptrTy);
1682 Value *LocalStackBase = OrigStackBase;
1684 if (DoStackMalloc) {
1685 // LocalStackBase = OrigStackBase
1686 // if (__asan_option_detect_stack_use_after_return)
1687 // LocalStackBase = __asan_stack_malloc_N(LocalStackBase, OrigStackBase);
1688 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
1689 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
1690 Constant *OptionDetectUAR = F.getParent()->getOrInsertGlobal(
1691 kAsanOptionDetectUAR, IRB.getInt32Ty());
1692 Value *Cmp = IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUAR),
1693 Constant::getNullValue(IRB.getInt32Ty()));
1694 Instruction *Term = SplitBlockAndInsertIfThen(Cmp, InsBefore, false);
1695 BasicBlock *CmpBlock = cast<Instruction>(Cmp)->getParent();
1696 IRBuilder<> IRBIf(Term);
1697 IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
1698 LocalStackBase = IRBIf.CreateCall2(
1699 AsanStackMallocFunc[StackMallocIdx],
1700 ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase);
1701 BasicBlock *SetBlock = cast<Instruction>(LocalStackBase)->getParent();
1702 IRB.SetInsertPoint(InsBefore);
1703 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1704 PHINode *Phi = IRB.CreatePHI(IntptrTy, 2);
1705 Phi->addIncoming(OrigStackBase, CmpBlock);
1706 Phi->addIncoming(LocalStackBase, SetBlock);
1707 LocalStackBase = Phi;
1710 // Insert poison calls for lifetime intrinsics for alloca.
1711 bool HavePoisonedAllocas = false;
1712 for (const auto &APC : AllocaPoisonCallVec) {
1713 assert(APC.InsBefore);
1715 IRBuilder<> IRB(APC.InsBefore);
1716 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
1717 HavePoisonedAllocas |= APC.DoPoison;
1720 // Replace Alloca instructions with base+offset.
1721 for (const auto &Desc : SVD) {
1722 AllocaInst *AI = Desc.AI;
1723 Value *NewAllocaPtr = IRB.CreateIntToPtr(
1724 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
1726 replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB);
1727 AI->replaceAllUsesWith(NewAllocaPtr);
1730 // The left-most redzone has enough space for at least 4 pointers.
1731 // Write the Magic value to redzone[0].
1732 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
1733 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
1735 // Write the frame description constant to redzone[1].
1736 Value *BasePlus1 = IRB.CreateIntToPtr(
1737 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)),
1739 GlobalVariable *StackDescriptionGlobal =
1740 createPrivateGlobalForString(*F.getParent(), L.DescriptionString,
1741 /*AllowMerging*/true);
1742 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal,
1744 IRB.CreateStore(Description, BasePlus1);
1745 // Write the PC to redzone[2].
1746 Value *BasePlus2 = IRB.CreateIntToPtr(
1747 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy,
1748 2 * ASan.LongSize/8)),
1750 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
1752 // Poison the stack redzones at the entry.
1753 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
1754 poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true);
1756 // (Un)poison the stack before all ret instructions.
1757 for (auto Ret : RetVec) {
1758 IRBuilder<> IRBRet(Ret);
1759 // Mark the current frame as retired.
1760 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
1762 if (DoStackMalloc) {
1763 assert(StackMallocIdx >= 0);
1764 // if LocalStackBase != OrigStackBase:
1765 // // In use-after-return mode, poison the whole stack frame.
1766 // if StackMallocIdx <= 4
1767 // // For small sizes inline the whole thing:
1768 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
1769 // **SavedFlagPtr(LocalStackBase) = 0
1771 // __asan_stack_free_N(LocalStackBase, OrigStackBase)
1773 // <This is not a fake stack; unpoison the redzones>
1774 Value *Cmp = IRBRet.CreateICmpNE(LocalStackBase, OrigStackBase);
1775 TerminatorInst *ThenTerm, *ElseTerm;
1776 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
1778 IRBuilder<> IRBPoison(ThenTerm);
1779 if (StackMallocIdx <= 4) {
1780 int ClassSize = kMinStackMallocSize << StackMallocIdx;
1781 SetShadowToStackAfterReturnInlined(IRBPoison, ShadowBase,
1782 ClassSize >> Mapping.Scale);
1783 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
1785 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
1786 Value *SavedFlagPtr = IRBPoison.CreateLoad(
1787 IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
1788 IRBPoison.CreateStore(
1789 Constant::getNullValue(IRBPoison.getInt8Ty()),
1790 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
1792 // For larger frames call __asan_stack_free_*.
1793 IRBPoison.CreateCall3(AsanStackFreeFunc[StackMallocIdx], LocalStackBase,
1794 ConstantInt::get(IntptrTy, LocalStackSize),
1798 IRBuilder<> IRBElse(ElseTerm);
1799 poisonRedZones(L.ShadowBytes, IRBElse, ShadowBase, false);
1800 } else if (HavePoisonedAllocas) {
1801 // If we poisoned some allocas in llvm.lifetime analysis,
1802 // unpoison whole stack frame now.
1803 assert(LocalStackBase == OrigStackBase);
1804 poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false);
1806 poisonRedZones(L.ShadowBytes, IRBRet, ShadowBase, false);
1810 // We are done. Remove the old unused alloca instructions.
1811 for (auto AI : AllocaVec)
1812 AI->eraseFromParent();
1815 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
1816 IRBuilder<> &IRB, bool DoPoison) {
1817 // For now just insert the call to ASan runtime.
1818 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
1819 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
1820 IRB.CreateCall2(DoPoison ? AsanPoisonStackMemoryFunc
1821 : AsanUnpoisonStackMemoryFunc,
1825 // Handling llvm.lifetime intrinsics for a given %alloca:
1826 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
1827 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
1828 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory
1829 // could be poisoned by previous llvm.lifetime.end instruction, as the
1830 // variable may go in and out of scope several times, e.g. in loops).
1831 // (3) if we poisoned at least one %alloca in a function,
1832 // unpoison the whole stack frame at function exit.
1834 AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
1835 if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
1836 // We're intested only in allocas we can handle.
1837 return isInterestingAlloca(*AI) ? AI : nullptr;
1838 // See if we've already calculated (or started to calculate) alloca for a
1840 AllocaForValueMapTy::iterator I = AllocaForValue.find(V);
1841 if (I != AllocaForValue.end())
1843 // Store 0 while we're calculating alloca for value V to avoid
1844 // infinite recursion if the value references itself.
1845 AllocaForValue[V] = nullptr;
1846 AllocaInst *Res = nullptr;
1847 if (CastInst *CI = dyn_cast<CastInst>(V))
1848 Res = findAllocaForValue(CI->getOperand(0));
1849 else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1850 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1851 Value *IncValue = PN->getIncomingValue(i);
1852 // Allow self-referencing phi-nodes.
1853 if (IncValue == PN) continue;
1854 AllocaInst *IncValueAI = findAllocaForValue(IncValue);
1855 // AI for incoming values should exist and should all be equal.
1856 if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res))
1862 AllocaForValue[V] = Res;