1 //===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Details of the algorithm:
12 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/Instrumentation.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DenseSet.h"
20 #include "llvm/ADT/DepthFirstIterator.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallString.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/CallSite.h"
31 #include "llvm/IR/DIBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/InstVisitor.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/IR/MDBuilder.h"
41 #include "llvm/IR/Module.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/MC/MCSectionMachO.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/DataTypes.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/Endian.h"
48 #include "llvm/Support/SwapByteOrder.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include "llvm/Transforms/Scalar.h"
51 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
52 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
53 #include "llvm/Transforms/Utils/Cloning.h"
54 #include "llvm/Transforms/Utils/Local.h"
55 #include "llvm/Transforms/Utils/ModuleUtils.h"
56 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
59 #include <system_error>
63 #define DEBUG_TYPE "asan"
65 static const uint64_t kDefaultShadowScale = 3;
66 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
67 static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
68 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
69 static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G.
70 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
71 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
72 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
73 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
74 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
75 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
76 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
78 static const size_t kMinStackMallocSize = 1 << 6; // 64B
79 static const size_t kMaxStackMallocSize = 1 << 16; // 64K
80 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
81 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
83 static const char *const kAsanModuleCtorName = "asan.module_ctor";
84 static const char *const kAsanModuleDtorName = "asan.module_dtor";
85 static const uint64_t kAsanCtorAndDtorPriority = 1;
86 static const char *const kAsanReportErrorTemplate = "__asan_report_";
87 static const char *const kAsanRegisterGlobalsName = "__asan_register_globals";
88 static const char *const kAsanUnregisterGlobalsName =
89 "__asan_unregister_globals";
90 static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
91 static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
92 static const char *const kAsanInitName = "__asan_init_v5";
93 static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
94 static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
95 static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return";
96 static const int kMaxAsanStackMallocSizeClass = 10;
97 static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_";
98 static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_";
99 static const char *const kAsanGenPrefix = "__asan_gen_";
100 static const char *const kSanCovGenPrefix = "__sancov_gen_";
101 static const char *const kAsanPoisonStackMemoryName =
102 "__asan_poison_stack_memory";
103 static const char *const kAsanUnpoisonStackMemoryName =
104 "__asan_unpoison_stack_memory";
106 static const char *const kAsanOptionDetectUAR =
107 "__asan_option_detect_stack_use_after_return";
109 static const char *const kAsanAllocaPoison =
110 "__asan_alloca_poison";
111 static const char *const kAsanAllocasUnpoison =
112 "__asan_allocas_unpoison";
114 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
115 static const size_t kNumberOfAccessSizes = 5;
117 static const unsigned kAllocaRzSize = 32;
119 // Command-line flags.
121 // This flag may need to be replaced with -f[no-]asan-reads.
122 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
123 cl::desc("instrument read instructions"),
124 cl::Hidden, cl::init(true));
125 static cl::opt<bool> ClInstrumentWrites(
126 "asan-instrument-writes", cl::desc("instrument write instructions"),
127 cl::Hidden, cl::init(true));
128 static cl::opt<bool> ClInstrumentAtomics(
129 "asan-instrument-atomics",
130 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
132 static cl::opt<bool> ClAlwaysSlowPath(
133 "asan-always-slow-path",
134 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
136 // This flag limits the number of instructions to be instrumented
137 // in any given BB. Normally, this should be set to unlimited (INT_MAX),
138 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
140 static cl::opt<int> ClMaxInsnsToInstrumentPerBB(
141 "asan-max-ins-per-bb", cl::init(10000),
142 cl::desc("maximal number of instructions to instrument in any given BB"),
144 // This flag may need to be replaced with -f[no]asan-stack.
145 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
146 cl::Hidden, cl::init(true));
147 static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
148 cl::desc("Check return-after-free"),
149 cl::Hidden, cl::init(true));
150 // This flag may need to be replaced with -f[no]asan-globals.
151 static cl::opt<bool> ClGlobals("asan-globals",
152 cl::desc("Handle global objects"), cl::Hidden,
154 static cl::opt<bool> ClInitializers("asan-initialization-order",
155 cl::desc("Handle C++ initializer order"),
156 cl::Hidden, cl::init(true));
157 static cl::opt<bool> ClInvalidPointerPairs(
158 "asan-detect-invalid-pointer-pair",
159 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
161 static cl::opt<unsigned> ClRealignStack(
162 "asan-realign-stack",
163 cl::desc("Realign stack to the value of this flag (power of two)"),
164 cl::Hidden, cl::init(32));
165 static cl::opt<int> ClInstrumentationWithCallsThreshold(
166 "asan-instrumentation-with-call-threshold",
168 "If the function being instrumented contains more than "
169 "this number of memory accesses, use callbacks instead of "
170 "inline checks (-1 means never use callbacks)."),
171 cl::Hidden, cl::init(7000));
172 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
173 "asan-memory-access-callback-prefix",
174 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
175 cl::init("__asan_"));
176 static cl::opt<bool> ClInstrumentAllocas("asan-instrument-allocas",
177 cl::desc("instrument dynamic allocas"),
178 cl::Hidden, cl::init(false));
179 static cl::opt<bool> ClSkipPromotableAllocas(
180 "asan-skip-promotable-allocas",
181 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
184 // These flags allow to change the shadow mapping.
185 // The shadow mapping looks like
186 // Shadow = (Mem >> scale) + (1 << offset_log)
187 static cl::opt<int> ClMappingScale("asan-mapping-scale",
188 cl::desc("scale of asan shadow mapping"),
189 cl::Hidden, cl::init(0));
191 // Optimization flags. Not user visible, used mostly for testing
192 // and benchmarking the tool.
193 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
194 cl::Hidden, cl::init(true));
195 static cl::opt<bool> ClOptSameTemp(
196 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
197 cl::Hidden, cl::init(true));
198 static cl::opt<bool> ClOptGlobals("asan-opt-globals",
199 cl::desc("Don't instrument scalar globals"),
200 cl::Hidden, cl::init(true));
201 static cl::opt<bool> ClOptStack(
202 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
203 cl::Hidden, cl::init(false));
205 static cl::opt<bool> ClCheckLifetime(
206 "asan-check-lifetime",
207 cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), cl::Hidden,
210 static cl::opt<bool> ClDynamicAllocaStack(
211 "asan-stack-dynamic-alloca",
212 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
215 static cl::opt<uint32_t> ClForceExperiment(
216 "asan-force-experiment",
217 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
221 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
223 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
224 cl::Hidden, cl::init(0));
225 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden,
226 cl::desc("Debug func"));
227 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
228 cl::Hidden, cl::init(-1));
229 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"),
230 cl::Hidden, cl::init(-1));
232 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
233 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
234 STATISTIC(NumOptimizedAccessesToGlobalVar,
235 "Number of optimized accesses to global vars");
236 STATISTIC(NumOptimizedAccessesToStackVar,
237 "Number of optimized accesses to stack vars");
240 /// Frontend-provided metadata for source location.
241 struct LocationMetadata {
246 LocationMetadata() : Filename(), LineNo(0), ColumnNo(0) {}
248 bool empty() const { return Filename.empty(); }
250 void parse(MDNode *MDN) {
251 assert(MDN->getNumOperands() == 3);
252 MDString *DIFilename = cast<MDString>(MDN->getOperand(0));
253 Filename = DIFilename->getString();
255 mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue();
257 mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue();
261 /// Frontend-provided metadata for global variables.
262 class GlobalsMetadata {
265 Entry() : SourceLoc(), Name(), IsDynInit(false), IsBlacklisted(false) {}
266 LocationMetadata SourceLoc;
272 GlobalsMetadata() : inited_(false) {}
274 void init(Module &M) {
277 NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
278 if (!Globals) return;
279 for (auto MDN : Globals->operands()) {
280 // Metadata node contains the global and the fields of "Entry".
281 assert(MDN->getNumOperands() == 5);
282 auto *GV = mdconst::extract_or_null<GlobalVariable>(MDN->getOperand(0));
283 // The optimizer may optimize away a global entirely.
285 // We can already have an entry for GV if it was merged with another
287 Entry &E = Entries[GV];
288 if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1)))
289 E.SourceLoc.parse(Loc);
290 if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2)))
291 E.Name = Name->getString();
292 ConstantInt *IsDynInit =
293 mdconst::extract<ConstantInt>(MDN->getOperand(3));
294 E.IsDynInit |= IsDynInit->isOne();
295 ConstantInt *IsBlacklisted =
296 mdconst::extract<ConstantInt>(MDN->getOperand(4));
297 E.IsBlacklisted |= IsBlacklisted->isOne();
301 /// Returns metadata entry for a given global.
302 Entry get(GlobalVariable *G) const {
303 auto Pos = Entries.find(G);
304 return (Pos != Entries.end()) ? Pos->second : Entry();
309 DenseMap<GlobalVariable *, Entry> Entries;
312 /// This struct defines the shadow mapping using the rule:
313 /// shadow = (mem >> Scale) ADD-or-OR Offset.
314 struct ShadowMapping {
320 static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) {
321 bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android;
322 bool IsIOS = TargetTriple.isiOS();
323 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
324 bool IsLinux = TargetTriple.isOSLinux();
325 bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 ||
326 TargetTriple.getArch() == llvm::Triple::ppc64le;
327 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
328 bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips ||
329 TargetTriple.getArch() == llvm::Triple::mipsel;
330 bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
331 TargetTriple.getArch() == llvm::Triple::mips64el;
332 bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64;
333 bool IsWindows = TargetTriple.isOSWindows();
335 ShadowMapping Mapping;
337 if (LongSize == 32) {
341 Mapping.Offset = kMIPS32_ShadowOffset32;
343 Mapping.Offset = kFreeBSD_ShadowOffset32;
345 Mapping.Offset = kIOSShadowOffset32;
347 Mapping.Offset = kWindowsShadowOffset32;
349 Mapping.Offset = kDefaultShadowOffset32;
350 } else { // LongSize == 64
352 Mapping.Offset = kPPC64_ShadowOffset64;
354 Mapping.Offset = kFreeBSD_ShadowOffset64;
355 else if (IsLinux && IsX86_64)
356 Mapping.Offset = kSmallX86_64ShadowOffset;
358 Mapping.Offset = kMIPS64_ShadowOffset64;
360 Mapping.Offset = kAArch64_ShadowOffset64;
362 Mapping.Offset = kDefaultShadowOffset64;
365 Mapping.Scale = kDefaultShadowScale;
366 if (ClMappingScale) {
367 Mapping.Scale = ClMappingScale;
370 // OR-ing shadow offset if more efficient (at least on x86) if the offset
371 // is a power of two, but on ppc64 we have to use add since the shadow
372 // offset is not necessary 1/8-th of the address space.
373 Mapping.OrShadowOffset = !IsPPC64 && !(Mapping.Offset & (Mapping.Offset - 1));
378 static size_t RedzoneSizeForScale(int MappingScale) {
379 // Redzone used for stack and globals is at least 32 bytes.
380 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
381 return std::max(32U, 1U << MappingScale);
384 /// AddressSanitizer: instrument the code in module to find memory bugs.
385 struct AddressSanitizer : public FunctionPass {
386 AddressSanitizer() : FunctionPass(ID) {
387 initializeAddressSanitizerPass(*PassRegistry::getPassRegistry());
389 const char *getPassName() const override {
390 return "AddressSanitizerFunctionPass";
392 void getAnalysisUsage(AnalysisUsage &AU) const override {
393 AU.addRequired<DominatorTreeWrapperPass>();
394 AU.addRequired<TargetLibraryInfoWrapperPass>();
396 uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
397 Type *Ty = AI->getAllocatedType();
398 uint64_t SizeInBytes =
399 AI->getModule()->getDataLayout().getTypeAllocSize(Ty);
402 /// Check if we want (and can) handle this alloca.
403 bool isInterestingAlloca(AllocaInst &AI);
405 // Check if we have dynamic alloca.
406 bool isDynamicAlloca(AllocaInst &AI) const {
407 return AI.isArrayAllocation() || !AI.isStaticAlloca();
410 /// If it is an interesting memory access, return the PointerOperand
411 /// and set IsWrite/Alignment. Otherwise return nullptr.
412 Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
414 unsigned *Alignment);
415 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
416 bool UseCalls, const DataLayout &DL);
417 void instrumentPointerComparisonOrSubtraction(Instruction *I);
418 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
419 Value *Addr, uint32_t TypeSize, bool IsWrite,
420 Value *SizeArgument, bool UseCalls, uint32_t Exp);
421 void instrumentUnusualSizeOrAlignment(Instruction *I, Value *Addr,
422 uint32_t TypeSize, bool IsWrite,
423 Value *SizeArgument, bool UseCalls,
425 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
426 Value *ShadowValue, uint32_t TypeSize);
427 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
428 bool IsWrite, size_t AccessSizeIndex,
429 Value *SizeArgument, uint32_t Exp);
430 void instrumentMemIntrinsic(MemIntrinsic *MI);
431 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
432 bool runOnFunction(Function &F) override;
433 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
434 bool doInitialization(Module &M) override;
435 static char ID; // Pass identification, replacement for typeid
437 DominatorTree &getDominatorTree() const { return *DT; }
440 void initializeCallbacks(Module &M);
442 bool LooksLikeCodeInBug11395(Instruction *I);
443 bool GlobalIsLinkerInitialized(GlobalVariable *G);
444 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
445 uint64_t TypeSize) const;
451 ShadowMapping Mapping;
453 Function *AsanCtorFunction;
454 Function *AsanInitFunction;
455 Function *AsanHandleNoReturnFunc;
456 Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
457 // This array is indexed by AccessIsWrite, Experiment and log2(AccessSize).
458 Function *AsanErrorCallback[2][2][kNumberOfAccessSizes];
459 Function *AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
460 // This array is indexed by AccessIsWrite and Experiment.
461 Function *AsanErrorCallbackSized[2][2];
462 Function *AsanMemoryAccessCallbackSized[2][2];
463 Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
465 GlobalsMetadata GlobalsMD;
466 DenseMap<AllocaInst *, bool> ProcessedAllocas;
468 friend struct FunctionStackPoisoner;
471 class AddressSanitizerModule : public ModulePass {
473 AddressSanitizerModule() : ModulePass(ID) {}
474 bool runOnModule(Module &M) override;
475 static char ID; // Pass identification, replacement for typeid
476 const char *getPassName() const override { return "AddressSanitizerModule"; }
479 void initializeCallbacks(Module &M);
481 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M);
482 bool ShouldInstrumentGlobal(GlobalVariable *G);
483 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
484 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
485 size_t MinRedzoneSizeForGlobal() const {
486 return RedzoneSizeForScale(Mapping.Scale);
489 GlobalsMetadata GlobalsMD;
493 ShadowMapping Mapping;
494 Function *AsanPoisonGlobals;
495 Function *AsanUnpoisonGlobals;
496 Function *AsanRegisterGlobals;
497 Function *AsanUnregisterGlobals;
500 // Stack poisoning does not play well with exception handling.
501 // When an exception is thrown, we essentially bypass the code
502 // that unpoisones the stack. This is why the run-time library has
503 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
504 // stack in the interceptor. This however does not work inside the
505 // actual function which catches the exception. Most likely because the
506 // compiler hoists the load of the shadow value somewhere too high.
507 // This causes asan to report a non-existing bug on 453.povray.
508 // It sounds like an LLVM bug.
509 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
511 AddressSanitizer &ASan;
516 ShadowMapping Mapping;
518 SmallVector<AllocaInst *, 16> AllocaVec;
519 SmallVector<Instruction *, 8> RetVec;
520 unsigned StackAlignment;
522 Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
523 *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
524 Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc;
525 Function *AsanAllocaPoisonFunc, *AsanAllocasUnpoisonFunc;
527 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
528 struct AllocaPoisonCall {
529 IntrinsicInst *InsBefore;
534 SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec;
536 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
537 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
538 AllocaInst *DynamicAllocaLayout = nullptr;
540 // Maps Value to an AllocaInst from which the Value is originated.
541 typedef DenseMap<Value *, AllocaInst *> AllocaForValueMapTy;
542 AllocaForValueMapTy AllocaForValue;
544 bool HasNonEmptyInlineAsm;
545 std::unique_ptr<CallInst> EmptyInlineAsm;
547 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
550 DIB(*F.getParent(), /*AllowUnresolved*/ false),
552 IntptrTy(ASan.IntptrTy),
553 IntptrPtrTy(PointerType::get(IntptrTy, 0)),
554 Mapping(ASan.Mapping),
555 StackAlignment(1 << Mapping.Scale),
556 HasNonEmptyInlineAsm(false),
557 EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {}
559 bool runOnFunction() {
560 if (!ClStack) return false;
561 // Collect alloca, ret, lifetime instructions etc.
562 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
564 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
566 initializeCallbacks(*F.getParent());
576 // Finds all Alloca instructions and puts
577 // poisoned red zones around all of them.
578 // Then unpoison everything back before the function returns.
581 void createDynamicAllocasInitStorage();
583 // ----------------------- Visitors.
584 /// \brief Collect all Ret instructions.
585 void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); }
587 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
589 IRBuilder<> IRB(InstBefore);
590 IRB.CreateCall(AsanAllocasUnpoisonFunc,
591 {IRB.CreateLoad(DynamicAllocaLayout),
592 IRB.CreatePtrToInt(SavedStack, IntptrTy)});
595 // Unpoison dynamic allocas redzones.
596 void unpoisonDynamicAllocas() {
597 for (auto &Ret : RetVec)
598 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
600 for (auto &StackRestoreInst : StackRestoreVec)
601 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
602 StackRestoreInst->getOperand(0));
605 // Deploy and poison redzones around dynamic alloca call. To do this, we
606 // should replace this call with another one with changed parameters and
607 // replace all its uses with new address, so
608 // addr = alloca type, old_size, align
610 // new_size = (old_size + additional_size) * sizeof(type)
611 // tmp = alloca i8, new_size, max(align, 32)
612 // addr = tmp + 32 (first 32 bytes are for the left redzone).
613 // Additional_size is added to make new memory allocation contain not only
614 // requested memory, but also left, partial and right redzones.
615 void handleDynamicAllocaCall(AllocaInst *AI);
617 /// \brief Collect Alloca instructions we want (and can) handle.
618 void visitAllocaInst(AllocaInst &AI) {
619 if (!ASan.isInterestingAlloca(AI)) return;
621 StackAlignment = std::max(StackAlignment, AI.getAlignment());
622 if (ASan.isDynamicAlloca(AI))
623 DynamicAllocaVec.push_back(&AI);
625 AllocaVec.push_back(&AI);
628 /// \brief Collect lifetime intrinsic calls to check for use-after-scope
630 void visitIntrinsicInst(IntrinsicInst &II) {
631 Intrinsic::ID ID = II.getIntrinsicID();
632 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
633 if (!ClCheckLifetime) return;
634 if (ID != Intrinsic::lifetime_start && ID != Intrinsic::lifetime_end)
636 // Found lifetime intrinsic, add ASan instrumentation if necessary.
637 ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
638 // If size argument is undefined, don't do anything.
639 if (Size->isMinusOne()) return;
640 // Check that size doesn't saturate uint64_t and can
641 // be stored in IntptrTy.
642 const uint64_t SizeValue = Size->getValue().getLimitedValue();
643 if (SizeValue == ~0ULL ||
644 !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
646 // Find alloca instruction that corresponds to llvm.lifetime argument.
647 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1));
649 bool DoPoison = (ID == Intrinsic::lifetime_end);
650 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
651 AllocaPoisonCallVec.push_back(APC);
654 void visitCallInst(CallInst &CI) {
655 HasNonEmptyInlineAsm |=
656 CI.isInlineAsm() && !CI.isIdenticalTo(EmptyInlineAsm.get());
659 // ---------------------- Helpers.
660 void initializeCallbacks(Module &M);
662 bool doesDominateAllExits(const Instruction *I) const {
663 for (auto Ret : RetVec) {
664 if (!ASan.getDominatorTree().dominates(I, Ret)) return false;
669 /// Finds alloca where the value comes from.
670 AllocaInst *findAllocaForValue(Value *V);
671 void poisonRedZones(ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB,
672 Value *ShadowBase, bool DoPoison);
673 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
675 void SetShadowToStackAfterReturnInlined(IRBuilder<> &IRB, Value *ShadowBase,
677 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
679 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
680 Instruction *ThenTerm, Value *ValueIfFalse);
685 char AddressSanitizer::ID = 0;
686 INITIALIZE_PASS_BEGIN(
687 AddressSanitizer, "asan",
688 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
690 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
692 AddressSanitizer, "asan",
693 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
695 FunctionPass *llvm::createAddressSanitizerFunctionPass() {
696 return new AddressSanitizer();
699 char AddressSanitizerModule::ID = 0;
701 AddressSanitizerModule, "asan-module",
702 "AddressSanitizer: detects use-after-free and out-of-bounds bugs."
705 ModulePass *llvm::createAddressSanitizerModulePass() {
706 return new AddressSanitizerModule();
709 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
710 size_t Res = countTrailingZeros(TypeSize / 8);
711 assert(Res < kNumberOfAccessSizes);
715 // \brief Create a constant for Str so that we can pass it to the run-time lib.
716 static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str,
718 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
719 // We use private linkage for module-local strings. If they can be merged
720 // with another one, we set the unnamed_addr attribute.
722 new GlobalVariable(M, StrConst->getType(), true,
723 GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix);
724 if (AllowMerging) GV->setUnnamedAddr(true);
725 GV->setAlignment(1); // Strings may not be merged w/o setting align 1.
729 /// \brief Create a global describing a source location.
730 static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M,
731 LocationMetadata MD) {
732 Constant *LocData[] = {
733 createPrivateGlobalForString(M, MD.Filename, true),
734 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo),
735 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo),
737 auto LocStruct = ConstantStruct::getAnon(LocData);
738 auto GV = new GlobalVariable(M, LocStruct->getType(), true,
739 GlobalValue::PrivateLinkage, LocStruct,
741 GV->setUnnamedAddr(true);
745 static bool GlobalWasGeneratedByAsan(GlobalVariable *G) {
746 return G->getName().find(kAsanGenPrefix) == 0 ||
747 G->getName().find(kSanCovGenPrefix) == 0;
750 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
752 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
753 if (Mapping.Offset == 0) return Shadow;
754 // (Shadow >> scale) | offset
755 if (Mapping.OrShadowOffset)
756 return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
758 return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
761 // Instrument memset/memmove/memcpy
762 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
764 if (isa<MemTransferInst>(MI)) {
766 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
767 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
768 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
769 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
770 } else if (isa<MemSetInst>(MI)) {
773 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
774 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
775 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
777 MI->eraseFromParent();
780 /// Check if we want (and can) handle this alloca.
781 bool AddressSanitizer::isInterestingAlloca(AllocaInst &AI) {
782 auto PreviouslySeenAllocaInfo = ProcessedAllocas.find(&AI);
784 if (PreviouslySeenAllocaInfo != ProcessedAllocas.end())
785 return PreviouslySeenAllocaInfo->getSecond();
788 (AI.getAllocatedType()->isSized() &&
789 // alloca() may be called with 0 size, ignore it.
790 getAllocaSizeInBytes(&AI) > 0 &&
791 // We are only interested in allocas not promotable to registers.
792 // Promotable allocas are common under -O0.
793 (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI) ||
794 isDynamicAlloca(AI)));
796 ProcessedAllocas[&AI] = IsInteresting;
797 return IsInteresting;
800 /// If I is an interesting memory access, return the PointerOperand
801 /// and set IsWrite/Alignment. Otherwise return nullptr.
802 Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
805 unsigned *Alignment) {
806 // Skip memory accesses inserted by another instrumentation.
807 if (I->getMetadata("nosanitize")) return nullptr;
809 Value *PtrOperand = nullptr;
810 const DataLayout &DL = I->getModule()->getDataLayout();
811 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
812 if (!ClInstrumentReads) return nullptr;
814 *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
815 *Alignment = LI->getAlignment();
816 PtrOperand = LI->getPointerOperand();
817 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
818 if (!ClInstrumentWrites) return nullptr;
820 *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
821 *Alignment = SI->getAlignment();
822 PtrOperand = SI->getPointerOperand();
823 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
824 if (!ClInstrumentAtomics) return nullptr;
826 *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
828 PtrOperand = RMW->getPointerOperand();
829 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
830 if (!ClInstrumentAtomics) return nullptr;
832 *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
834 PtrOperand = XCHG->getPointerOperand();
837 // Treat memory accesses to promotable allocas as non-interesting since they
838 // will not cause memory violations. This greatly speeds up the instrumented
839 // executable at -O0.
840 if (ClSkipPromotableAllocas)
841 if (auto AI = dyn_cast_or_null<AllocaInst>(PtrOperand))
842 return isInterestingAlloca(*AI) ? AI : nullptr;
847 static bool isPointerOperand(Value *V) {
848 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
851 // This is a rough heuristic; it may cause both false positives and
852 // false negatives. The proper implementation requires cooperation with
854 static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) {
855 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
856 if (!Cmp->isRelational()) return false;
857 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
858 if (BO->getOpcode() != Instruction::Sub) return false;
862 if (!isPointerOperand(I->getOperand(0)) ||
863 !isPointerOperand(I->getOperand(1)))
868 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
869 // If a global variable does not have dynamic initialization we don't
870 // have to instrument it. However, if a global does not have initializer
871 // at all, we assume it has dynamic initializer (in other TU).
872 return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit;
875 void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
878 Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
879 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
880 for (int i = 0; i < 2; i++) {
881 if (Param[i]->getType()->isPointerTy())
882 Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy);
884 IRB.CreateCall(F, Param);
887 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
888 Instruction *I, bool UseCalls,
889 const DataLayout &DL) {
890 bool IsWrite = false;
891 unsigned Alignment = 0;
892 uint64_t TypeSize = 0;
893 Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment);
896 // Optimization experiments.
897 // The experiments can be used to evaluate potential optimizations that remove
898 // instrumentation (assess false negatives). Instead of completely removing
899 // some instrumentation, you set Exp to a non-zero value (mask of optimization
900 // experiments that want to remove instrumentation of this instruction).
901 // If Exp is non-zero, this pass will emit special calls into runtime
902 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
903 // make runtime terminate the program in a special way (with a different
904 // exit status). Then you run the new compiler on a buggy corpus, collect
905 // the special terminations (ideally, you don't see them at all -- no false
906 // negatives) and make the decision on the optimization.
907 uint32_t Exp = ClForceExperiment;
909 if (ClOpt && ClOptGlobals) {
910 // If initialization order checking is disabled, a simple access to a
911 // dynamically initialized global is always valid.
912 GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL));
913 if (G != NULL && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
914 isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
915 NumOptimizedAccessesToGlobalVar++;
920 if (ClOpt && ClOptStack) {
921 // A direct inbounds access to a stack variable is always valid.
922 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
923 isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
924 NumOptimizedAccessesToStackVar++;
930 NumInstrumentedWrites++;
932 NumInstrumentedReads++;
934 unsigned Granularity = 1 << Mapping.Scale;
935 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
936 // if the data is properly aligned.
937 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
939 (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
940 return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls,
942 instrumentUnusualSizeOrAlignment(I, Addr, TypeSize, IsWrite, nullptr,
946 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
947 Value *Addr, bool IsWrite,
948 size_t AccessSizeIndex,
951 IRBuilder<> IRB(InsertBefore);
952 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
953 CallInst *Call = nullptr;
956 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0],
957 {Addr, SizeArgument});
959 Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1],
960 {Addr, SizeArgument, ExpVal});
964 IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
966 Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex],
970 // We don't do Call->setDoesNotReturn() because the BB already has
971 // UnreachableInst at the end.
972 // This EmptyAsm is required to avoid callback merge.
973 IRB.CreateCall(EmptyAsm, {});
977 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
980 size_t Granularity = 1 << Mapping.Scale;
981 // Addr & (Granularity - 1)
982 Value *LastAccessedByte =
983 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
984 // (Addr & (Granularity - 1)) + size - 1
985 if (TypeSize / 8 > 1)
986 LastAccessedByte = IRB.CreateAdd(
987 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
988 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
990 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
991 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
992 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
995 void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
996 Instruction *InsertBefore, Value *Addr,
997 uint32_t TypeSize, bool IsWrite,
998 Value *SizeArgument, bool UseCalls,
1000 IRBuilder<> IRB(InsertBefore);
1001 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1002 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
1006 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
1009 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1010 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1015 IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale));
1016 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
1017 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1018 Value *CmpVal = Constant::getNullValue(ShadowTy);
1019 Value *ShadowValue =
1020 IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
1022 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
1023 size_t Granularity = 1 << Mapping.Scale;
1024 TerminatorInst *CrashTerm = nullptr;
1026 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
1027 // We use branch weights for the slow path check, to indicate that the slow
1028 // path is rarely taken. This seems to be the case for SPEC benchmarks.
1029 TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen(
1030 Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
1031 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1032 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1033 IRB.SetInsertPoint(CheckTerm);
1034 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
1035 BasicBlock *CrashBlock =
1036 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1037 CrashTerm = new UnreachableInst(*C, CrashBlock);
1038 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1039 ReplaceInstWithInst(CheckTerm, NewTerm);
1041 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true);
1044 Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
1045 AccessSizeIndex, SizeArgument, Exp);
1046 Crash->setDebugLoc(OrigIns->getDebugLoc());
1049 // Instrument unusual size or unusual alignment.
1050 // We can not do it with a single check, so we do 1-byte check for the first
1051 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
1052 // to report the actual access size.
1053 void AddressSanitizer::instrumentUnusualSizeOrAlignment(
1054 Instruction *I, Value *Addr, uint32_t TypeSize, bool IsWrite,
1055 Value *SizeArgument, bool UseCalls, uint32_t Exp) {
1057 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
1058 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1061 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0],
1064 IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1],
1065 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1067 Value *LastByte = IRB.CreateIntToPtr(
1068 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
1070 instrumentAddress(I, I, Addr, 8, IsWrite, Size, false, Exp);
1071 instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false, Exp);
1075 void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit,
1076 GlobalValue *ModuleName) {
1077 // Set up the arguments to our poison/unpoison functions.
1078 IRBuilder<> IRB(GlobalInit.begin()->getFirstInsertionPt());
1080 // Add a call to poison all external globals before the given function starts.
1081 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
1082 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1084 // Add calls to unpoison all globals before each return instruction.
1085 for (auto &BB : GlobalInit.getBasicBlockList())
1086 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1087 CallInst::Create(AsanUnpoisonGlobals, "", RI);
1090 void AddressSanitizerModule::createInitializerPoisonCalls(
1091 Module &M, GlobalValue *ModuleName) {
1092 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1094 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
1095 for (Use &OP : CA->operands()) {
1096 if (isa<ConstantAggregateZero>(OP)) continue;
1097 ConstantStruct *CS = cast<ConstantStruct>(OP);
1099 // Must have a function or null ptr.
1100 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
1101 if (F->getName() == kAsanModuleCtorName) continue;
1102 ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
1103 // Don't instrument CTORs that will run before asan.module_ctor.
1104 if (Priority->getLimitedValue() <= kAsanCtorAndDtorPriority) continue;
1105 poisonOneInitializer(*F, ModuleName);
1110 bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
1111 Type *Ty = cast<PointerType>(G->getType())->getElementType();
1112 DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
1114 if (GlobalsMD.get(G).IsBlacklisted) return false;
1115 if (!Ty->isSized()) return false;
1116 if (!G->hasInitializer()) return false;
1117 if (GlobalWasGeneratedByAsan(G)) return false; // Our own global.
1118 // Touch only those globals that will not be defined in other modules.
1119 // Don't handle ODR linkage types and COMDATs since other modules may be built
1121 if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
1122 G->getLinkage() != GlobalVariable::PrivateLinkage &&
1123 G->getLinkage() != GlobalVariable::InternalLinkage)
1125 if (G->hasComdat()) return false;
1126 // Two problems with thread-locals:
1127 // - The address of the main thread's copy can't be computed at link-time.
1128 // - Need to poison all copies, not just the main thread's one.
1129 if (G->isThreadLocal()) return false;
1130 // For now, just ignore this Global if the alignment is large.
1131 if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false;
1133 if (G->hasSection()) {
1134 StringRef Section(G->getSection());
1136 if (TargetTriple.isOSBinFormatMachO()) {
1137 StringRef ParsedSegment, ParsedSection;
1138 unsigned TAA = 0, StubSize = 0;
1140 std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier(
1141 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize);
1142 if (!ErrorCode.empty()) {
1143 report_fatal_error("Invalid section specifier '" + ParsedSection +
1144 "': " + ErrorCode + ".");
1147 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
1148 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
1150 if (ParsedSegment == "__OBJC" ||
1151 (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) {
1152 DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
1155 // See http://code.google.com/p/address-sanitizer/issues/detail?id=32
1156 // Constant CFString instances are compiled in the following way:
1157 // -- the string buffer is emitted into
1158 // __TEXT,__cstring,cstring_literals
1159 // -- the constant NSConstantString structure referencing that buffer
1160 // is placed into __DATA,__cfstring
1161 // Therefore there's no point in placing redzones into __DATA,__cfstring.
1162 // Moreover, it causes the linker to crash on OS X 10.7
1163 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
1164 DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
1167 // The linker merges the contents of cstring_literals and removes the
1169 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
1170 DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
1175 // Callbacks put into the CRT initializer/terminator sections
1176 // should not be instrumented.
1177 // See https://code.google.com/p/address-sanitizer/issues/detail?id=305
1178 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
1179 if (Section.startswith(".CRT")) {
1180 DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n");
1184 // Globals from llvm.metadata aren't emitted, do not instrument them.
1185 if (Section == "llvm.metadata") return false;
1191 void AddressSanitizerModule::initializeCallbacks(Module &M) {
1192 IRBuilder<> IRB(*C);
1193 // Declare our poisoning and unpoisoning functions.
1194 AsanPoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1195 kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, nullptr));
1196 AsanPoisonGlobals->setLinkage(Function::ExternalLinkage);
1197 AsanUnpoisonGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1198 kAsanUnpoisonGlobalsName, IRB.getVoidTy(), nullptr));
1199 AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage);
1200 // Declare functions that register/unregister globals.
1201 AsanRegisterGlobals = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1202 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1203 AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
1204 AsanUnregisterGlobals = checkSanitizerInterfaceFunction(
1205 M.getOrInsertFunction(kAsanUnregisterGlobalsName, IRB.getVoidTy(),
1206 IntptrTy, IntptrTy, nullptr));
1207 AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
1210 // This function replaces all global variables with new variables that have
1211 // trailing redzones. It also creates a function that poisons
1212 // redzones and inserts this function into llvm.global_ctors.
1213 bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
1216 SmallVector<GlobalVariable *, 16> GlobalsToChange;
1218 for (auto &G : M.globals()) {
1219 if (ShouldInstrumentGlobal(&G)) GlobalsToChange.push_back(&G);
1222 size_t n = GlobalsToChange.size();
1223 if (n == 0) return false;
1225 // A global is described by a structure
1228 // size_t size_with_redzone;
1229 // const char *name;
1230 // const char *module_name;
1231 // size_t has_dynamic_init;
1232 // void *source_location;
1233 // We initialize an array of such structures and pass it to a run-time call.
1234 StructType *GlobalStructTy =
1235 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
1236 IntptrTy, IntptrTy, nullptr);
1237 SmallVector<Constant *, 16> Initializers(n);
1239 bool HasDynamicallyInitializedGlobals = false;
1241 // We shouldn't merge same module names, as this string serves as unique
1242 // module ID in runtime.
1243 GlobalVariable *ModuleName = createPrivateGlobalForString(
1244 M, M.getModuleIdentifier(), /*AllowMerging*/ false);
1246 auto &DL = M.getDataLayout();
1247 for (size_t i = 0; i < n; i++) {
1248 static const uint64_t kMaxGlobalRedzone = 1 << 18;
1249 GlobalVariable *G = GlobalsToChange[i];
1251 auto MD = GlobalsMD.get(G);
1252 // Create string holding the global name (use global name from metadata
1253 // if it's available, otherwise just write the name of global variable).
1254 GlobalVariable *Name = createPrivateGlobalForString(
1255 M, MD.Name.empty() ? G->getName() : MD.Name,
1256 /*AllowMerging*/ true);
1258 PointerType *PtrTy = cast<PointerType>(G->getType());
1259 Type *Ty = PtrTy->getElementType();
1260 uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
1261 uint64_t MinRZ = MinRedzoneSizeForGlobal();
1262 // MinRZ <= RZ <= kMaxGlobalRedzone
1263 // and trying to make RZ to be ~ 1/4 of SizeInBytes.
1264 uint64_t RZ = std::max(
1265 MinRZ, std::min(kMaxGlobalRedzone, (SizeInBytes / MinRZ / 4) * MinRZ));
1266 uint64_t RightRedzoneSize = RZ;
1267 // Round up to MinRZ
1268 if (SizeInBytes % MinRZ) RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
1269 assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
1270 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
1272 StructType *NewTy = StructType::get(Ty, RightRedZoneTy, nullptr);
1273 Constant *NewInitializer =
1274 ConstantStruct::get(NewTy, G->getInitializer(),
1275 Constant::getNullValue(RightRedZoneTy), nullptr);
1277 // Create a new global variable with enough space for a redzone.
1278 GlobalValue::LinkageTypes Linkage = G->getLinkage();
1279 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
1280 Linkage = GlobalValue::InternalLinkage;
1281 GlobalVariable *NewGlobal =
1282 new GlobalVariable(M, NewTy, G->isConstant(), Linkage, NewInitializer,
1283 "", G, G->getThreadLocalMode());
1284 NewGlobal->copyAttributesFrom(G);
1285 NewGlobal->setAlignment(MinRZ);
1288 Indices2[0] = IRB.getInt32(0);
1289 Indices2[1] = IRB.getInt32(0);
1291 G->replaceAllUsesWith(
1292 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
1293 NewGlobal->takeName(G);
1294 G->eraseFromParent();
1296 Constant *SourceLoc;
1297 if (!MD.SourceLoc.empty()) {
1298 auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc);
1299 SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy);
1301 SourceLoc = ConstantInt::get(IntptrTy, 0);
1304 Initializers[i] = ConstantStruct::get(
1305 GlobalStructTy, ConstantExpr::getPointerCast(NewGlobal, IntptrTy),
1306 ConstantInt::get(IntptrTy, SizeInBytes),
1307 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
1308 ConstantExpr::getPointerCast(Name, IntptrTy),
1309 ConstantExpr::getPointerCast(ModuleName, IntptrTy),
1310 ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, nullptr);
1312 if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true;
1314 DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
1317 ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n);
1318 GlobalVariable *AllGlobals = new GlobalVariable(
1319 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
1320 ConstantArray::get(ArrayOfGlobalStructTy, Initializers), "");
1322 // Create calls for poisoning before initializers run and unpoisoning after.
1323 if (HasDynamicallyInitializedGlobals)
1324 createInitializerPoisonCalls(M, ModuleName);
1325 IRB.CreateCall(AsanRegisterGlobals,
1326 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
1327 ConstantInt::get(IntptrTy, n)});
1329 // We also need to unregister globals at the end, e.g. when a shared library
1331 Function *AsanDtorFunction =
1332 Function::Create(FunctionType::get(Type::getVoidTy(*C), false),
1333 GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
1334 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
1335 IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB));
1336 IRB_Dtor.CreateCall(AsanUnregisterGlobals,
1337 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
1338 ConstantInt::get(IntptrTy, n)});
1339 appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority);
1345 bool AddressSanitizerModule::runOnModule(Module &M) {
1346 C = &(M.getContext());
1347 int LongSize = M.getDataLayout().getPointerSizeInBits();
1348 IntptrTy = Type::getIntNTy(*C, LongSize);
1349 TargetTriple = Triple(M.getTargetTriple());
1350 Mapping = getShadowMapping(TargetTriple, LongSize);
1351 initializeCallbacks(M);
1353 bool Changed = false;
1355 Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
1357 IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
1359 if (ClGlobals) Changed |= InstrumentGlobals(IRB, M);
1364 void AddressSanitizer::initializeCallbacks(Module &M) {
1365 IRBuilder<> IRB(*C);
1366 // Create __asan_report* callbacks.
1367 // IsWrite, TypeSize and Exp are encoded in the function name.
1368 for (int Exp = 0; Exp < 2; Exp++) {
1369 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
1370 const std::string TypeStr = AccessIsWrite ? "store" : "load";
1371 const std::string ExpStr = Exp ? "exp_" : "";
1372 const Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr;
1373 AsanErrorCallbackSized[AccessIsWrite][Exp] =
1374 checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1375 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n",
1376 IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr));
1377 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] =
1378 checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1379 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N",
1380 IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr));
1381 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
1382 AccessSizeIndex++) {
1383 const std::string Suffix = TypeStr + itostr(1 << AccessSizeIndex);
1384 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
1385 checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1386 kAsanReportErrorTemplate + ExpStr + Suffix, IRB.getVoidTy(),
1387 IntptrTy, ExpType, nullptr));
1388 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
1389 checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1390 ClMemoryAccessCallbackPrefix + ExpStr + Suffix, IRB.getVoidTy(),
1391 IntptrTy, ExpType, nullptr));
1396 AsanMemmove = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1397 ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
1398 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
1399 AsanMemcpy = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1400 ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
1401 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
1402 AsanMemset = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1403 ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(),
1404 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr));
1406 AsanHandleNoReturnFunc = checkSanitizerInterfaceFunction(
1407 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), nullptr));
1409 AsanPtrCmpFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1410 kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1411 AsanPtrSubFunction = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1412 kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1413 // We insert an empty inline asm after __asan_report* to avoid callback merge.
1414 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
1415 StringRef(""), StringRef(""),
1416 /*hasSideEffects=*/true);
1420 bool AddressSanitizer::doInitialization(Module &M) {
1421 // Initialize the private fields. No one has accessed them before.
1425 C = &(M.getContext());
1426 LongSize = M.getDataLayout().getPointerSizeInBits();
1427 IntptrTy = Type::getIntNTy(*C, LongSize);
1428 TargetTriple = Triple(M.getTargetTriple());
1430 std::tie(AsanCtorFunction, AsanInitFunction) =
1431 createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, kAsanInitName,
1432 /*InitArgTypes=*/{},
1435 Mapping = getShadowMapping(TargetTriple, LongSize);
1437 appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
1441 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
1442 // For each NSObject descendant having a +load method, this method is invoked
1443 // by the ObjC runtime before any of the static constructors is called.
1444 // Therefore we need to instrument such methods with a call to __asan_init
1445 // at the beginning in order to initialize our runtime before any access to
1446 // the shadow memory.
1447 // We cannot just ignore these methods, because they may call other
1448 // instrumented functions.
1449 if (F.getName().find(" load]") != std::string::npos) {
1450 IRBuilder<> IRB(F.begin()->begin());
1451 IRB.CreateCall(AsanInitFunction, {});
1457 bool AddressSanitizer::runOnFunction(Function &F) {
1458 if (&F == AsanCtorFunction) return false;
1459 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
1460 DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
1461 initializeCallbacks(*F.getParent());
1463 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1465 // If needed, insert __asan_init before checking for SanitizeAddress attr.
1466 maybeInsertAsanInitAtFunctionEntry(F);
1468 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return false;
1470 if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) return false;
1472 // We want to instrument every address only once per basic block (unless there
1473 // are calls between uses).
1474 SmallSet<Value *, 16> TempsToInstrument;
1475 SmallVector<Instruction *, 16> ToInstrument;
1476 SmallVector<Instruction *, 8> NoReturnCalls;
1477 SmallVector<BasicBlock *, 16> AllBlocks;
1478 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
1484 // Fill the set of memory operations to instrument.
1485 for (auto &BB : F) {
1486 AllBlocks.push_back(&BB);
1487 TempsToInstrument.clear();
1488 int NumInsnsPerBB = 0;
1489 for (auto &Inst : BB) {
1490 if (LooksLikeCodeInBug11395(&Inst)) return false;
1491 if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
1493 if (ClOpt && ClOptSameTemp) {
1494 if (!TempsToInstrument.insert(Addr).second)
1495 continue; // We've seen this temp in the current BB.
1497 } else if (ClInvalidPointerPairs &&
1498 isInterestingPointerComparisonOrSubtraction(&Inst)) {
1499 PointerComparisonsOrSubtracts.push_back(&Inst);
1501 } else if (isa<MemIntrinsic>(Inst)) {
1504 if (isa<AllocaInst>(Inst)) NumAllocas++;
1507 // A call inside BB.
1508 TempsToInstrument.clear();
1509 if (CS.doesNotReturn()) NoReturnCalls.push_back(CS.getInstruction());
1513 ToInstrument.push_back(&Inst);
1515 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
1519 bool UseCalls = false;
1520 if (ClInstrumentationWithCallsThreshold >= 0 &&
1521 ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold)
1524 const TargetLibraryInfo *TLI =
1525 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1526 const DataLayout &DL = F.getParent()->getDataLayout();
1527 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(),
1528 /*RoundToAlign=*/true);
1531 int NumInstrumented = 0;
1532 for (auto Inst : ToInstrument) {
1533 if (ClDebugMin < 0 || ClDebugMax < 0 ||
1534 (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
1535 if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment))
1536 instrumentMop(ObjSizeVis, Inst, UseCalls,
1537 F.getParent()->getDataLayout());
1539 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
1544 FunctionStackPoisoner FSP(F, *this);
1545 bool ChangedStack = FSP.runOnFunction();
1547 // We must unpoison the stack before every NoReturn call (throw, _exit, etc).
1548 // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
1549 for (auto CI : NoReturnCalls) {
1550 IRBuilder<> IRB(CI);
1551 IRB.CreateCall(AsanHandleNoReturnFunc, {});
1554 for (auto Inst : PointerComparisonsOrSubtracts) {
1555 instrumentPointerComparisonOrSubtraction(Inst);
1559 bool res = NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty();
1561 DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n");
1566 // Workaround for bug 11395: we don't want to instrument stack in functions
1567 // with large assembly blobs (32-bit only), otherwise reg alloc may crash.
1568 // FIXME: remove once the bug 11395 is fixed.
1569 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
1570 if (LongSize != 32) return false;
1571 CallInst *CI = dyn_cast<CallInst>(I);
1572 if (!CI || !CI->isInlineAsm()) return false;
1573 if (CI->getNumArgOperands() <= 5) return false;
1574 // We have inline assembly with quite a few arguments.
1578 void FunctionStackPoisoner::initializeCallbacks(Module &M) {
1579 IRBuilder<> IRB(*C);
1580 for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) {
1581 std::string Suffix = itostr(i);
1582 AsanStackMallocFunc[i] = checkSanitizerInterfaceFunction(
1583 M.getOrInsertFunction(kAsanStackMallocNameTemplate + Suffix, IntptrTy,
1584 IntptrTy, nullptr));
1585 AsanStackFreeFunc[i] = checkSanitizerInterfaceFunction(
1586 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
1587 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1589 AsanPoisonStackMemoryFunc = checkSanitizerInterfaceFunction(
1590 M.getOrInsertFunction(kAsanPoisonStackMemoryName, IRB.getVoidTy(),
1591 IntptrTy, IntptrTy, nullptr));
1592 AsanUnpoisonStackMemoryFunc = checkSanitizerInterfaceFunction(
1593 M.getOrInsertFunction(kAsanUnpoisonStackMemoryName, IRB.getVoidTy(),
1594 IntptrTy, IntptrTy, nullptr));
1595 AsanAllocaPoisonFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1596 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1597 AsanAllocasUnpoisonFunc =
1598 checkSanitizerInterfaceFunction(M.getOrInsertFunction(
1599 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1602 void FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes,
1603 IRBuilder<> &IRB, Value *ShadowBase,
1605 size_t n = ShadowBytes.size();
1607 // We need to (un)poison n bytes of stack shadow. Poison as many as we can
1608 // using 64-bit stores (if we are on 64-bit arch), then poison the rest
1609 // with 32-bit stores, then with 16-byte stores, then with 8-byte stores.
1610 for (size_t LargeStoreSizeInBytes = ASan.LongSize / 8;
1611 LargeStoreSizeInBytes != 0; LargeStoreSizeInBytes /= 2) {
1612 for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) {
1614 for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {
1615 if (F.getParent()->getDataLayout().isLittleEndian())
1616 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
1618 Val = (Val << 8) | ShadowBytes[i + j];
1621 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
1622 Type *StoreTy = Type::getIntNTy(*C, LargeStoreSizeInBytes * 8);
1623 Value *Poison = ConstantInt::get(StoreTy, DoPoison ? Val : 0);
1624 IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, StoreTy->getPointerTo()));
1629 // Fake stack allocator (asan_fake_stack.h) has 11 size classes
1630 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
1631 static int StackMallocSizeClass(uint64_t LocalStackSize) {
1632 assert(LocalStackSize <= kMaxStackMallocSize);
1633 uint64_t MaxSize = kMinStackMallocSize;
1634 for (int i = 0;; i++, MaxSize *= 2)
1635 if (LocalStackSize <= MaxSize) return i;
1636 llvm_unreachable("impossible LocalStackSize");
1639 // Set Size bytes starting from ShadowBase to kAsanStackAfterReturnMagic.
1640 // We can not use MemSet intrinsic because it may end up calling the actual
1641 // memset. Size is a multiple of 8.
1642 // Currently this generates 8-byte stores on x86_64; it may be better to
1643 // generate wider stores.
1644 void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
1645 IRBuilder<> &IRB, Value *ShadowBase, int Size) {
1646 assert(!(Size % 8));
1648 // kAsanStackAfterReturnMagic is 0xf5.
1649 const uint64_t kAsanStackAfterReturnMagic64 = 0xf5f5f5f5f5f5f5f5ULL;
1651 for (int i = 0; i < Size; i += 8) {
1652 Value *p = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
1654 ConstantInt::get(IRB.getInt64Ty(), kAsanStackAfterReturnMagic64),
1655 IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo()));
1659 static DebugLoc getFunctionEntryDebugLocation(Function &F) {
1660 for (const auto &Inst : F.getEntryBlock())
1661 if (!isa<AllocaInst>(Inst)) return Inst.getDebugLoc();
1665 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
1667 Instruction *ThenTerm,
1668 Value *ValueIfFalse) {
1669 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
1670 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
1671 PHI->addIncoming(ValueIfFalse, CondBlock);
1672 BasicBlock *ThenBlock = ThenTerm->getParent();
1673 PHI->addIncoming(ValueIfTrue, ThenBlock);
1677 Value *FunctionStackPoisoner::createAllocaForLayout(
1678 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
1681 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
1682 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
1685 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
1686 nullptr, "MyAlloca");
1687 assert(Alloca->isStaticAlloca());
1689 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
1690 size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack);
1691 Alloca->setAlignment(FrameAlignment);
1692 return IRB.CreatePointerCast(Alloca, IntptrTy);
1695 void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
1696 BasicBlock &FirstBB = *F.begin();
1697 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
1698 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
1699 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
1700 DynamicAllocaLayout->setAlignment(32);
1703 void FunctionStackPoisoner::poisonStack() {
1704 assert(AllocaVec.size() > 0 || DynamicAllocaVec.size() > 0);
1706 if (ClInstrumentAllocas && DynamicAllocaVec.size() > 0) {
1707 // Handle dynamic allocas.
1708 createDynamicAllocasInitStorage();
1709 for (auto &AI : DynamicAllocaVec)
1710 handleDynamicAllocaCall(AI);
1712 unpoisonDynamicAllocas();
1715 if (AllocaVec.size() == 0) return;
1717 int StackMallocIdx = -1;
1718 DebugLoc EntryDebugLocation = getFunctionEntryDebugLocation(F);
1720 Instruction *InsBefore = AllocaVec[0];
1721 IRBuilder<> IRB(InsBefore);
1722 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1724 SmallVector<ASanStackVariableDescription, 16> SVD;
1725 SVD.reserve(AllocaVec.size());
1726 for (AllocaInst *AI : AllocaVec) {
1727 ASanStackVariableDescription D = {AI->getName().data(),
1728 ASan.getAllocaSizeInBytes(AI),
1729 AI->getAlignment(), AI, 0};
1732 // Minimal header size (left redzone) is 4 pointers,
1733 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
1734 size_t MinHeaderSize = ASan.LongSize / 2;
1735 ASanStackFrameLayout L;
1736 ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L);
1737 DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n");
1738 uint64_t LocalStackSize = L.FrameSize;
1739 bool DoStackMalloc =
1740 ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize;
1741 // Don't do dynamic alloca in presence of inline asm: too often it makes
1742 // assumptions on which registers are available. Don't do stack malloc in the
1743 // presence of inline asm on 32-bit platforms for the same reason.
1744 bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm;
1745 DoStackMalloc &= !HasNonEmptyInlineAsm || ASan.LongSize != 32;
1747 Value *StaticAlloca =
1748 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
1751 Value *LocalStackBase;
1753 if (DoStackMalloc) {
1754 // void *FakeStack = __asan_option_detect_stack_use_after_return
1755 // ? __asan_stack_malloc_N(LocalStackSize)
1757 // void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize);
1758 Constant *OptionDetectUAR = F.getParent()->getOrInsertGlobal(
1759 kAsanOptionDetectUAR, IRB.getInt32Ty());
1760 Value *UARIsEnabled =
1761 IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUAR),
1762 Constant::getNullValue(IRB.getInt32Ty()));
1764 SplitBlockAndInsertIfThen(UARIsEnabled, InsBefore, false);
1765 IRBuilder<> IRBIf(Term);
1766 IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
1767 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
1768 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
1769 Value *FakeStackValue =
1770 IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx],
1771 ConstantInt::get(IntptrTy, LocalStackSize));
1772 IRB.SetInsertPoint(InsBefore);
1773 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1774 FakeStack = createPHI(IRB, UARIsEnabled, FakeStackValue, Term,
1775 ConstantInt::get(IntptrTy, 0));
1777 Value *NoFakeStack =
1778 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
1779 Term = SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
1780 IRBIf.SetInsertPoint(Term);
1781 IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
1782 Value *AllocaValue =
1783 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
1784 IRB.SetInsertPoint(InsBefore);
1785 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1786 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
1788 // void *FakeStack = nullptr;
1789 // void *LocalStackBase = alloca(LocalStackSize);
1790 FakeStack = ConstantInt::get(IntptrTy, 0);
1792 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
1795 // Insert poison calls for lifetime intrinsics for alloca.
1796 bool HavePoisonedAllocas = false;
1797 for (const auto &APC : AllocaPoisonCallVec) {
1798 assert(APC.InsBefore);
1800 IRBuilder<> IRB(APC.InsBefore);
1801 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
1802 HavePoisonedAllocas |= APC.DoPoison;
1805 // Replace Alloca instructions with base+offset.
1806 for (const auto &Desc : SVD) {
1807 AllocaInst *AI = Desc.AI;
1808 Value *NewAllocaPtr = IRB.CreateIntToPtr(
1809 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
1811 replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, /*Deref=*/true);
1812 AI->replaceAllUsesWith(NewAllocaPtr);
1815 // The left-most redzone has enough space for at least 4 pointers.
1816 // Write the Magic value to redzone[0].
1817 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
1818 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
1820 // Write the frame description constant to redzone[1].
1821 Value *BasePlus1 = IRB.CreateIntToPtr(
1822 IRB.CreateAdd(LocalStackBase,
1823 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
1825 GlobalVariable *StackDescriptionGlobal =
1826 createPrivateGlobalForString(*F.getParent(), L.DescriptionString,
1827 /*AllowMerging*/ true);
1828 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
1829 IRB.CreateStore(Description, BasePlus1);
1830 // Write the PC to redzone[2].
1831 Value *BasePlus2 = IRB.CreateIntToPtr(
1832 IRB.CreateAdd(LocalStackBase,
1833 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
1835 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
1837 // Poison the stack redzones at the entry.
1838 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
1839 poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true);
1841 // (Un)poison the stack before all ret instructions.
1842 for (auto Ret : RetVec) {
1843 IRBuilder<> IRBRet(Ret);
1844 // Mark the current frame as retired.
1845 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
1847 if (DoStackMalloc) {
1848 assert(StackMallocIdx >= 0);
1849 // if FakeStack != 0 // LocalStackBase == FakeStack
1850 // // In use-after-return mode, poison the whole stack frame.
1851 // if StackMallocIdx <= 4
1852 // // For small sizes inline the whole thing:
1853 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
1854 // **SavedFlagPtr(FakeStack) = 0
1856 // __asan_stack_free_N(FakeStack, LocalStackSize)
1858 // <This is not a fake stack; unpoison the redzones>
1860 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
1861 TerminatorInst *ThenTerm, *ElseTerm;
1862 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
1864 IRBuilder<> IRBPoison(ThenTerm);
1865 if (StackMallocIdx <= 4) {
1866 int ClassSize = kMinStackMallocSize << StackMallocIdx;
1867 SetShadowToStackAfterReturnInlined(IRBPoison, ShadowBase,
1868 ClassSize >> Mapping.Scale);
1869 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
1871 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
1872 Value *SavedFlagPtr = IRBPoison.CreateLoad(
1873 IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
1874 IRBPoison.CreateStore(
1875 Constant::getNullValue(IRBPoison.getInt8Ty()),
1876 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
1878 // For larger frames call __asan_stack_free_*.
1879 IRBPoison.CreateCall(
1880 AsanStackFreeFunc[StackMallocIdx],
1881 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
1884 IRBuilder<> IRBElse(ElseTerm);
1885 poisonRedZones(L.ShadowBytes, IRBElse, ShadowBase, false);
1886 } else if (HavePoisonedAllocas) {
1887 // If we poisoned some allocas in llvm.lifetime analysis,
1888 // unpoison whole stack frame now.
1889 poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false);
1891 poisonRedZones(L.ShadowBytes, IRBRet, ShadowBase, false);
1895 // We are done. Remove the old unused alloca instructions.
1896 for (auto AI : AllocaVec) AI->eraseFromParent();
1899 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
1900 IRBuilder<> &IRB, bool DoPoison) {
1901 // For now just insert the call to ASan runtime.
1902 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
1903 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
1904 IRB.CreateCall(DoPoison ? AsanPoisonStackMemoryFunc
1905 : AsanUnpoisonStackMemoryFunc,
1906 {AddrArg, SizeArg});
1909 // Handling llvm.lifetime intrinsics for a given %alloca:
1910 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
1911 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
1912 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory
1913 // could be poisoned by previous llvm.lifetime.end instruction, as the
1914 // variable may go in and out of scope several times, e.g. in loops).
1915 // (3) if we poisoned at least one %alloca in a function,
1916 // unpoison the whole stack frame at function exit.
1918 AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
1919 if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
1920 // We're intested only in allocas we can handle.
1921 return ASan.isInterestingAlloca(*AI) ? AI : nullptr;
1922 // See if we've already calculated (or started to calculate) alloca for a
1924 AllocaForValueMapTy::iterator I = AllocaForValue.find(V);
1925 if (I != AllocaForValue.end()) return I->second;
1926 // Store 0 while we're calculating alloca for value V to avoid
1927 // infinite recursion if the value references itself.
1928 AllocaForValue[V] = nullptr;
1929 AllocaInst *Res = nullptr;
1930 if (CastInst *CI = dyn_cast<CastInst>(V))
1931 Res = findAllocaForValue(CI->getOperand(0));
1932 else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1933 for (Value *IncValue : PN->incoming_values()) {
1934 // Allow self-referencing phi-nodes.
1935 if (IncValue == PN) continue;
1936 AllocaInst *IncValueAI = findAllocaForValue(IncValue);
1937 // AI for incoming values should exist and should all be equal.
1938 if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res))
1943 if (Res) AllocaForValue[V] = Res;
1947 void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
1948 IRBuilder<> IRB(AI);
1950 const unsigned Align = std::max(kAllocaRzSize, AI->getAlignment());
1951 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
1953 Value *Zero = Constant::getNullValue(IntptrTy);
1954 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
1955 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
1957 // Since we need to extend alloca with additional memory to locate
1958 // redzones, and OldSize is number of allocated blocks with
1959 // ElementSize size, get allocated memory size in bytes by
1960 // OldSize * ElementSize.
1961 const unsigned ElementSize =
1962 F.getParent()->getDataLayout().getTypeAllocSize(AI->getAllocatedType());
1964 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
1965 ConstantInt::get(IntptrTy, ElementSize));
1967 // PartialSize = OldSize % 32
1968 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
1970 // Misalign = kAllocaRzSize - PartialSize;
1971 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
1973 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
1974 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
1975 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
1977 // AdditionalChunkSize = Align + PartialPadding + kAllocaRzSize
1978 // Align is added to locate left redzone, PartialPadding for possible
1979 // partial redzone and kAllocaRzSize for right redzone respectively.
1980 Value *AdditionalChunkSize = IRB.CreateAdd(
1981 ConstantInt::get(IntptrTy, Align + kAllocaRzSize), PartialPadding);
1983 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
1985 // Insert new alloca with new NewSize and Align params.
1986 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
1987 NewAlloca->setAlignment(Align);
1989 // NewAddress = Address + Align
1990 Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
1991 ConstantInt::get(IntptrTy, Align));
1993 // Insert __asan_alloca_poison call for new created alloca.
1994 IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize});
1996 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
1997 // for unpoisoning stuff.
1998 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
2000 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
2002 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
2003 AI->replaceAllUsesWith(NewAddressPtr);
2005 // We are done. Erase old alloca from parent.
2006 AI->eraseFromParent();
2009 // isSafeAccess returns true if Addr is always inbounds with respect to its
2010 // base object. For example, it is a field access or an array access with
2011 // constant inbounds index.
2012 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
2013 Value *Addr, uint64_t TypeSize) const {
2014 SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr);
2015 if (!ObjSizeVis.bothKnown(SizeOffset)) return false;
2016 uint64_t Size = SizeOffset.first.getZExtValue();
2017 int64_t Offset = SizeOffset.second.getSExtValue();
2018 // Three checks are required to ensure safety:
2019 // . Offset >= 0 (since the offset is given from the base ptr)
2020 // . Size >= Offset (unsigned)
2021 // . Size - Offset >= NeededSize (unsigned)
2022 return Offset >= 0 && Size >= uint64_t(Offset) &&
2023 Size - uint64_t(Offset) >= TypeSize / 8;