1 //===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Details of the algorithm:
12 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/Instrumentation.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DenseSet.h"
20 #include "llvm/ADT/DepthFirstIterator.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallString.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/CallSite.h"
31 #include "llvm/IR/DIBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/InstVisitor.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/IR/MDBuilder.h"
41 #include "llvm/IR/Module.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/MC/MCSectionMachO.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/DataTypes.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/Endian.h"
48 #include "llvm/Support/SwapByteOrder.h"
49 #include "llvm/Transforms/Scalar.h"
50 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
51 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
52 #include "llvm/Transforms/Utils/Cloning.h"
53 #include "llvm/Transforms/Utils/Local.h"
54 #include "llvm/Transforms/Utils/ModuleUtils.h"
55 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
58 #include <system_error>
62 #define DEBUG_TYPE "asan"
64 static const uint64_t kDefaultShadowScale = 3;
65 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
66 static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
67 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
68 static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G.
69 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
70 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
71 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
72 static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
73 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
74 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
75 static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
77 static const size_t kMinStackMallocSize = 1 << 6; // 64B
78 static const size_t kMaxStackMallocSize = 1 << 16; // 64K
79 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
80 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
82 static const char *const kAsanModuleCtorName = "asan.module_ctor";
83 static const char *const kAsanModuleDtorName = "asan.module_dtor";
84 static const uint64_t kAsanCtorAndDtorPriority = 1;
85 static const char *const kAsanReportErrorTemplate = "__asan_report_";
86 static const char *const kAsanReportLoadN = "__asan_report_load_n";
87 static const char *const kAsanReportStoreN = "__asan_report_store_n";
88 static const char *const kAsanRegisterGlobalsName = "__asan_register_globals";
89 static const char *const kAsanUnregisterGlobalsName =
90 "__asan_unregister_globals";
91 static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
92 static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
93 static const char *const kAsanInitName = "__asan_init_v5";
94 static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
95 static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
96 static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return";
97 static const int kMaxAsanStackMallocSizeClass = 10;
98 static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_";
99 static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_";
100 static const char *const kAsanGenPrefix = "__asan_gen_";
101 static const char *const kSanCovGenPrefix = "__sancov_gen_";
102 static const char *const kAsanPoisonStackMemoryName =
103 "__asan_poison_stack_memory";
104 static const char *const kAsanUnpoisonStackMemoryName =
105 "__asan_unpoison_stack_memory";
107 static const char *const kAsanOptionDetectUAR =
108 "__asan_option_detect_stack_use_after_return";
111 static const int kAsanStackAfterReturnMagic = 0xf5;
114 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
115 static const size_t kNumberOfAccessSizes = 5;
117 static const unsigned kAllocaRzSize = 32;
118 static const unsigned kAsanAllocaLeftMagic = 0xcacacacaU;
119 static const unsigned kAsanAllocaRightMagic = 0xcbcbcbcbU;
120 static const unsigned kAsanAllocaPartialVal1 = 0xcbcbcb00U;
121 static const unsigned kAsanAllocaPartialVal2 = 0x000000cbU;
123 // Command-line flags.
125 // This flag may need to be replaced with -f[no-]asan-reads.
126 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
127 cl::desc("instrument read instructions"),
128 cl::Hidden, cl::init(true));
129 static cl::opt<bool> ClInstrumentWrites(
130 "asan-instrument-writes", cl::desc("instrument write instructions"),
131 cl::Hidden, cl::init(true));
132 static cl::opt<bool> ClInstrumentAtomics(
133 "asan-instrument-atomics",
134 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
136 static cl::opt<bool> ClAlwaysSlowPath(
137 "asan-always-slow-path",
138 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
140 // This flag limits the number of instructions to be instrumented
141 // in any given BB. Normally, this should be set to unlimited (INT_MAX),
142 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
144 static cl::opt<int> ClMaxInsnsToInstrumentPerBB(
145 "asan-max-ins-per-bb", cl::init(10000),
146 cl::desc("maximal number of instructions to instrument in any given BB"),
148 // This flag may need to be replaced with -f[no]asan-stack.
149 static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
150 cl::Hidden, cl::init(true));
151 static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
152 cl::desc("Check return-after-free"),
153 cl::Hidden, cl::init(true));
154 // This flag may need to be replaced with -f[no]asan-globals.
155 static cl::opt<bool> ClGlobals("asan-globals",
156 cl::desc("Handle global objects"), cl::Hidden,
158 static cl::opt<bool> ClInitializers("asan-initialization-order",
159 cl::desc("Handle C++ initializer order"),
160 cl::Hidden, cl::init(true));
161 static cl::opt<bool> ClInvalidPointerPairs(
162 "asan-detect-invalid-pointer-pair",
163 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
165 static cl::opt<unsigned> ClRealignStack(
166 "asan-realign-stack",
167 cl::desc("Realign stack to the value of this flag (power of two)"),
168 cl::Hidden, cl::init(32));
169 static cl::opt<int> ClInstrumentationWithCallsThreshold(
170 "asan-instrumentation-with-call-threshold",
172 "If the function being instrumented contains more than "
173 "this number of memory accesses, use callbacks instead of "
174 "inline checks (-1 means never use callbacks)."),
175 cl::Hidden, cl::init(7000));
176 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
177 "asan-memory-access-callback-prefix",
178 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
179 cl::init("__asan_"));
180 static cl::opt<bool> ClInstrumentAllocas("asan-instrument-allocas",
181 cl::desc("instrument dynamic allocas"),
182 cl::Hidden, cl::init(false));
183 static cl::opt<bool> ClSkipPromotableAllocas(
184 "asan-skip-promotable-allocas",
185 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
188 // These flags allow to change the shadow mapping.
189 // The shadow mapping looks like
190 // Shadow = (Mem >> scale) + (1 << offset_log)
191 static cl::opt<int> ClMappingScale("asan-mapping-scale",
192 cl::desc("scale of asan shadow mapping"),
193 cl::Hidden, cl::init(0));
195 // Optimization flags. Not user visible, used mostly for testing
196 // and benchmarking the tool.
197 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
198 cl::Hidden, cl::init(true));
199 static cl::opt<bool> ClOptSameTemp(
200 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
201 cl::Hidden, cl::init(true));
202 static cl::opt<bool> ClOptGlobals("asan-opt-globals",
203 cl::desc("Don't instrument scalar globals"),
204 cl::Hidden, cl::init(true));
205 static cl::opt<bool> ClOptStack(
206 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
207 cl::Hidden, cl::init(false));
209 static cl::opt<bool> ClCheckLifetime(
210 "asan-check-lifetime",
211 cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), cl::Hidden,
214 static cl::opt<bool> ClDynamicAllocaStack(
215 "asan-stack-dynamic-alloca",
216 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
220 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
222 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
223 cl::Hidden, cl::init(0));
224 static cl::opt<std::string> ClDebugFunc("asan-debug-func", cl::Hidden,
225 cl::desc("Debug func"));
226 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
227 cl::Hidden, cl::init(-1));
228 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"),
229 cl::Hidden, cl::init(-1));
231 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
232 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
233 STATISTIC(NumInstrumentedDynamicAllocas,
234 "Number of instrumented dynamic allocas");
235 STATISTIC(NumOptimizedAccessesToGlobalVar,
236 "Number of optimized accesses to global vars");
237 STATISTIC(NumOptimizedAccessesToStackVar,
238 "Number of optimized accesses to stack vars");
241 /// Frontend-provided metadata for source location.
242 struct LocationMetadata {
247 LocationMetadata() : Filename(), LineNo(0), ColumnNo(0) {}
249 bool empty() const { return Filename.empty(); }
251 void parse(MDNode *MDN) {
252 assert(MDN->getNumOperands() == 3);
253 MDString *MDFilename = cast<MDString>(MDN->getOperand(0));
254 Filename = MDFilename->getString();
256 mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue();
258 mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue();
262 /// Frontend-provided metadata for global variables.
263 class GlobalsMetadata {
266 Entry() : SourceLoc(), Name(), IsDynInit(false), IsBlacklisted(false) {}
267 LocationMetadata SourceLoc;
273 GlobalsMetadata() : inited_(false) {}
275 void init(Module &M) {
278 NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
279 if (!Globals) return;
280 for (auto MDN : Globals->operands()) {
281 // Metadata node contains the global and the fields of "Entry".
282 assert(MDN->getNumOperands() == 5);
283 auto *GV = mdconst::extract_or_null<GlobalVariable>(MDN->getOperand(0));
284 // The optimizer may optimize away a global entirely.
286 // We can already have an entry for GV if it was merged with another
288 Entry &E = Entries[GV];
289 if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1)))
290 E.SourceLoc.parse(Loc);
291 if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2)))
292 E.Name = Name->getString();
293 ConstantInt *IsDynInit =
294 mdconst::extract<ConstantInt>(MDN->getOperand(3));
295 E.IsDynInit |= IsDynInit->isOne();
296 ConstantInt *IsBlacklisted =
297 mdconst::extract<ConstantInt>(MDN->getOperand(4));
298 E.IsBlacklisted |= IsBlacklisted->isOne();
302 /// Returns metadata entry for a given global.
303 Entry get(GlobalVariable *G) const {
304 auto Pos = Entries.find(G);
305 return (Pos != Entries.end()) ? Pos->second : Entry();
310 DenseMap<GlobalVariable *, Entry> Entries;
313 /// This struct defines the shadow mapping using the rule:
314 /// shadow = (mem >> Scale) ADD-or-OR Offset.
315 struct ShadowMapping {
321 static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) {
322 bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android;
323 bool IsIOS = TargetTriple.isiOS();
324 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
325 bool IsLinux = TargetTriple.isOSLinux();
326 bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 ||
327 TargetTriple.getArch() == llvm::Triple::ppc64le;
328 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
329 bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips ||
330 TargetTriple.getArch() == llvm::Triple::mipsel;
331 bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
332 TargetTriple.getArch() == llvm::Triple::mips64el;
333 bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64;
334 bool IsWindows = TargetTriple.isOSWindows();
336 ShadowMapping Mapping;
338 if (LongSize == 32) {
342 Mapping.Offset = kMIPS32_ShadowOffset32;
344 Mapping.Offset = kFreeBSD_ShadowOffset32;
346 Mapping.Offset = kIOSShadowOffset32;
348 Mapping.Offset = kWindowsShadowOffset32;
350 Mapping.Offset = kDefaultShadowOffset32;
351 } else { // LongSize == 64
353 Mapping.Offset = kPPC64_ShadowOffset64;
355 Mapping.Offset = kFreeBSD_ShadowOffset64;
356 else if (IsLinux && IsX86_64)
357 Mapping.Offset = kSmallX86_64ShadowOffset;
359 Mapping.Offset = kMIPS64_ShadowOffset64;
361 Mapping.Offset = kAArch64_ShadowOffset64;
363 Mapping.Offset = kDefaultShadowOffset64;
366 Mapping.Scale = kDefaultShadowScale;
367 if (ClMappingScale) {
368 Mapping.Scale = ClMappingScale;
371 // OR-ing shadow offset if more efficient (at least on x86) if the offset
372 // is a power of two, but on ppc64 we have to use add since the shadow
373 // offset is not necessary 1/8-th of the address space.
374 Mapping.OrShadowOffset = !IsPPC64 && !(Mapping.Offset & (Mapping.Offset - 1));
379 static size_t RedzoneSizeForScale(int MappingScale) {
380 // Redzone used for stack and globals is at least 32 bytes.
381 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
382 return std::max(32U, 1U << MappingScale);
385 /// AddressSanitizer: instrument the code in module to find memory bugs.
386 struct AddressSanitizer : public FunctionPass {
387 AddressSanitizer() : FunctionPass(ID) {
388 initializeAddressSanitizerPass(*PassRegistry::getPassRegistry());
390 const char *getPassName() const override {
391 return "AddressSanitizerFunctionPass";
393 void getAnalysisUsage(AnalysisUsage &AU) const override {
394 AU.addRequired<DominatorTreeWrapperPass>();
395 AU.addRequired<DataLayoutPass>();
396 AU.addRequired<TargetLibraryInfoWrapperPass>();
398 uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
399 Type *Ty = AI->getAllocatedType();
400 uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
403 /// Check if we want (and can) handle this alloca.
404 bool isInterestingAlloca(AllocaInst &AI) const;
405 /// If it is an interesting memory access, return the PointerOperand
406 /// and set IsWrite/Alignment. Otherwise return nullptr.
407 Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
409 unsigned *Alignment) const;
410 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
412 void instrumentPointerComparisonOrSubtraction(Instruction *I);
413 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
414 Value *Addr, uint32_t TypeSize, bool IsWrite,
415 Value *SizeArgument, bool UseCalls);
416 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
417 Value *ShadowValue, uint32_t TypeSize);
418 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
419 bool IsWrite, size_t AccessSizeIndex,
420 Value *SizeArgument);
421 void instrumentMemIntrinsic(MemIntrinsic *MI);
422 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
423 bool runOnFunction(Function &F) override;
424 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
425 bool doInitialization(Module &M) override;
426 static char ID; // Pass identification, replacement for typeid
428 DominatorTree &getDominatorTree() const { return *DT; }
431 void initializeCallbacks(Module &M);
433 bool LooksLikeCodeInBug11395(Instruction *I);
434 bool GlobalIsLinkerInitialized(GlobalVariable *G);
435 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
436 uint64_t TypeSize) const;
439 const DataLayout *DL;
443 ShadowMapping Mapping;
445 Function *AsanCtorFunction;
446 Function *AsanInitFunction;
447 Function *AsanHandleNoReturnFunc;
448 Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
449 // This array is indexed by AccessIsWrite and log2(AccessSize).
450 Function *AsanErrorCallback[2][kNumberOfAccessSizes];
451 Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes];
452 // This array is indexed by AccessIsWrite.
453 Function *AsanErrorCallbackSized[2], *AsanMemoryAccessCallbackSized[2];
454 Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
456 GlobalsMetadata GlobalsMD;
458 friend struct FunctionStackPoisoner;
461 class AddressSanitizerModule : public ModulePass {
463 AddressSanitizerModule() : ModulePass(ID) {}
464 bool runOnModule(Module &M) override;
465 static char ID; // Pass identification, replacement for typeid
466 const char *getPassName() const override { return "AddressSanitizerModule"; }
469 void initializeCallbacks(Module &M);
471 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M);
472 bool ShouldInstrumentGlobal(GlobalVariable *G);
473 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
474 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
475 size_t MinRedzoneSizeForGlobal() const {
476 return RedzoneSizeForScale(Mapping.Scale);
479 GlobalsMetadata GlobalsMD;
482 const DataLayout *DL;
484 ShadowMapping Mapping;
485 Function *AsanPoisonGlobals;
486 Function *AsanUnpoisonGlobals;
487 Function *AsanRegisterGlobals;
488 Function *AsanUnregisterGlobals;
491 // Stack poisoning does not play well with exception handling.
492 // When an exception is thrown, we essentially bypass the code
493 // that unpoisones the stack. This is why the run-time library has
494 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
495 // stack in the interceptor. This however does not work inside the
496 // actual function which catches the exception. Most likely because the
497 // compiler hoists the load of the shadow value somewhere too high.
498 // This causes asan to report a non-existing bug on 453.povray.
499 // It sounds like an LLVM bug.
500 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
502 AddressSanitizer &ASan;
507 ShadowMapping Mapping;
509 SmallVector<AllocaInst *, 16> AllocaVec;
510 SmallVector<Instruction *, 8> RetVec;
511 unsigned StackAlignment;
513 Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
514 *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
515 Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc;
517 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
518 struct AllocaPoisonCall {
519 IntrinsicInst *InsBefore;
524 SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec;
526 // Stores left and right redzone shadow addresses for dynamic alloca
527 // and pointer to alloca instruction itself.
528 // LeftRzAddr is a shadow address for alloca left redzone.
529 // RightRzAddr is a shadow address for alloca right redzone.
530 struct DynamicAllocaCall {
535 explicit DynamicAllocaCall(AllocaInst *AI, Value *LeftRzAddr = nullptr,
536 Value *RightRzAddr = nullptr)
538 LeftRzAddr(LeftRzAddr),
539 RightRzAddr(RightRzAddr),
542 SmallVector<DynamicAllocaCall, 1> DynamicAllocaVec;
544 // Maps Value to an AllocaInst from which the Value is originated.
545 typedef DenseMap<Value *, AllocaInst *> AllocaForValueMapTy;
546 AllocaForValueMapTy AllocaForValue;
548 bool HasNonEmptyInlineAsm;
549 std::unique_ptr<CallInst> EmptyInlineAsm;
551 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
554 DIB(*F.getParent(), /*AllowUnresolved*/ false),
556 IntptrTy(ASan.IntptrTy),
557 IntptrPtrTy(PointerType::get(IntptrTy, 0)),
558 Mapping(ASan.Mapping),
559 StackAlignment(1 << Mapping.Scale),
560 HasNonEmptyInlineAsm(false),
561 EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {}
563 bool runOnFunction() {
564 if (!ClStack) return false;
565 // Collect alloca, ret, lifetime instructions etc.
566 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
568 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
570 initializeCallbacks(*F.getParent());
580 // Finds all Alloca instructions and puts
581 // poisoned red zones around all of them.
582 // Then unpoison everything back before the function returns.
585 // ----------------------- Visitors.
586 /// \brief Collect all Ret instructions.
587 void visitReturnInst(ReturnInst &RI) { RetVec.push_back(&RI); }
589 // Unpoison dynamic allocas redzones.
590 void unpoisonDynamicAlloca(DynamicAllocaCall &AllocaCall) {
591 if (!AllocaCall.Poison) return;
592 for (auto Ret : RetVec) {
593 IRBuilder<> IRBRet(Ret);
594 PointerType *Int32PtrTy = PointerType::getUnqual(IRBRet.getInt32Ty());
595 Value *Zero = Constant::getNullValue(IRBRet.getInt32Ty());
596 Value *PartialRzAddr = IRBRet.CreateSub(AllocaCall.RightRzAddr,
597 ConstantInt::get(IntptrTy, 4));
599 Zero, IRBRet.CreateIntToPtr(AllocaCall.LeftRzAddr, Int32PtrTy));
600 IRBRet.CreateStore(Zero,
601 IRBRet.CreateIntToPtr(PartialRzAddr, Int32PtrTy));
603 Zero, IRBRet.CreateIntToPtr(AllocaCall.RightRzAddr, Int32PtrTy));
607 // Right shift for BigEndian and left shift for LittleEndian.
608 Value *shiftAllocaMagic(Value *Val, IRBuilder<> &IRB, Value *Shift) {
609 return ASan.DL->isLittleEndian() ? IRB.CreateShl(Val, Shift)
610 : IRB.CreateLShr(Val, Shift);
613 // Compute PartialRzMagic for dynamic alloca call. Since we don't know the
614 // size of requested memory until runtime, we should compute it dynamically.
615 // If PartialSize is 0, PartialRzMagic would contain kAsanAllocaRightMagic,
616 // otherwise it would contain the value that we will use to poison the
617 // partial redzone for alloca call.
618 Value *computePartialRzMagic(Value *PartialSize, IRBuilder<> &IRB);
620 // Deploy and poison redzones around dynamic alloca call. To do this, we
621 // should replace this call with another one with changed parameters and
622 // replace all its uses with new address, so
623 // addr = alloca type, old_size, align
625 // new_size = (old_size + additional_size) * sizeof(type)
626 // tmp = alloca i8, new_size, max(align, 32)
627 // addr = tmp + 32 (first 32 bytes are for the left redzone).
628 // Additional_size is added to make new memory allocation contain not only
629 // requested memory, but also left, partial and right redzones.
630 // After that, we should poison redzones:
631 // (1) Left redzone with kAsanAllocaLeftMagic.
632 // (2) Partial redzone with the value, computed in runtime by
633 // computePartialRzMagic function.
634 // (3) Right redzone with kAsanAllocaRightMagic.
635 void handleDynamicAllocaCall(DynamicAllocaCall &AllocaCall);
637 /// \brief Collect Alloca instructions we want (and can) handle.
638 void visitAllocaInst(AllocaInst &AI) {
639 if (!ASan.isInterestingAlloca(AI)) return;
641 StackAlignment = std::max(StackAlignment, AI.getAlignment());
642 if (isDynamicAlloca(AI))
643 DynamicAllocaVec.push_back(DynamicAllocaCall(&AI));
645 AllocaVec.push_back(&AI);
648 /// \brief Collect lifetime intrinsic calls to check for use-after-scope
650 void visitIntrinsicInst(IntrinsicInst &II) {
651 if (!ClCheckLifetime) return;
652 Intrinsic::ID ID = II.getIntrinsicID();
653 if (ID != Intrinsic::lifetime_start && ID != Intrinsic::lifetime_end)
655 // Found lifetime intrinsic, add ASan instrumentation if necessary.
656 ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
657 // If size argument is undefined, don't do anything.
658 if (Size->isMinusOne()) return;
659 // Check that size doesn't saturate uint64_t and can
660 // be stored in IntptrTy.
661 const uint64_t SizeValue = Size->getValue().getLimitedValue();
662 if (SizeValue == ~0ULL ||
663 !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
665 // Find alloca instruction that corresponds to llvm.lifetime argument.
666 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1));
668 bool DoPoison = (ID == Intrinsic::lifetime_end);
669 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
670 AllocaPoisonCallVec.push_back(APC);
673 void visitCallInst(CallInst &CI) {
674 HasNonEmptyInlineAsm |=
675 CI.isInlineAsm() && !CI.isIdenticalTo(EmptyInlineAsm.get());
678 // ---------------------- Helpers.
679 void initializeCallbacks(Module &M);
681 bool doesDominateAllExits(const Instruction *I) const {
682 for (auto Ret : RetVec) {
683 if (!ASan.getDominatorTree().dominates(I, Ret)) return false;
688 bool isDynamicAlloca(AllocaInst &AI) const {
689 return AI.isArrayAllocation() || !AI.isStaticAlloca();
691 /// Finds alloca where the value comes from.
692 AllocaInst *findAllocaForValue(Value *V);
693 void poisonRedZones(ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB,
694 Value *ShadowBase, bool DoPoison);
695 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
697 void SetShadowToStackAfterReturnInlined(IRBuilder<> &IRB, Value *ShadowBase,
699 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
701 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
702 Instruction *ThenTerm, Value *ValueIfFalse);
707 char AddressSanitizer::ID = 0;
708 INITIALIZE_PASS_BEGIN(
709 AddressSanitizer, "asan",
710 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
712 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
714 AddressSanitizer, "asan",
715 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
717 FunctionPass *llvm::createAddressSanitizerFunctionPass() {
718 return new AddressSanitizer();
721 char AddressSanitizerModule::ID = 0;
723 AddressSanitizerModule, "asan-module",
724 "AddressSanitizer: detects use-after-free and out-of-bounds bugs."
727 ModulePass *llvm::createAddressSanitizerModulePass() {
728 return new AddressSanitizerModule();
731 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
732 size_t Res = countTrailingZeros(TypeSize / 8);
733 assert(Res < kNumberOfAccessSizes);
737 // \brief Create a constant for Str so that we can pass it to the run-time lib.
738 static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str,
740 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
741 // We use private linkage for module-local strings. If they can be merged
742 // with another one, we set the unnamed_addr attribute.
744 new GlobalVariable(M, StrConst->getType(), true,
745 GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix);
746 if (AllowMerging) GV->setUnnamedAddr(true);
747 GV->setAlignment(1); // Strings may not be merged w/o setting align 1.
751 /// \brief Create a global describing a source location.
752 static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M,
753 LocationMetadata MD) {
754 Constant *LocData[] = {
755 createPrivateGlobalForString(M, MD.Filename, true),
756 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo),
757 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo),
759 auto LocStruct = ConstantStruct::getAnon(LocData);
760 auto GV = new GlobalVariable(M, LocStruct->getType(), true,
761 GlobalValue::PrivateLinkage, LocStruct,
763 GV->setUnnamedAddr(true);
767 static bool GlobalWasGeneratedByAsan(GlobalVariable *G) {
768 return G->getName().find(kAsanGenPrefix) == 0 ||
769 G->getName().find(kSanCovGenPrefix) == 0;
772 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
774 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
775 if (Mapping.Offset == 0) return Shadow;
776 // (Shadow >> scale) | offset
777 if (Mapping.OrShadowOffset)
778 return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
780 return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
783 // Instrument memset/memmove/memcpy
784 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
786 if (isa<MemTransferInst>(MI)) {
788 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
789 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
790 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
791 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
792 } else if (isa<MemSetInst>(MI)) {
795 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
796 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
797 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
799 MI->eraseFromParent();
802 /// Check if we want (and can) handle this alloca.
803 bool AddressSanitizer::isInterestingAlloca(AllocaInst &AI) const {
804 return (AI.getAllocatedType()->isSized() &&
805 // alloca() may be called with 0 size, ignore it.
806 getAllocaSizeInBytes(&AI) > 0 &&
807 // We are only interested in allocas not promotable to registers.
808 // Promotable allocas are common under -O0.
809 (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)));
812 /// If I is an interesting memory access, return the PointerOperand
813 /// and set IsWrite/Alignment. Otherwise return nullptr.
814 Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
817 unsigned *Alignment) const {
818 // Skip memory accesses inserted by another instrumentation.
819 if (I->getMetadata("nosanitize")) return nullptr;
821 Value *PtrOperand = nullptr;
822 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
823 if (!ClInstrumentReads) return nullptr;
825 *TypeSize = DL->getTypeStoreSizeInBits(LI->getType());
826 *Alignment = LI->getAlignment();
827 PtrOperand = LI->getPointerOperand();
828 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
829 if (!ClInstrumentWrites) return nullptr;
831 *TypeSize = DL->getTypeStoreSizeInBits(SI->getValueOperand()->getType());
832 *Alignment = SI->getAlignment();
833 PtrOperand = SI->getPointerOperand();
834 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
835 if (!ClInstrumentAtomics) return nullptr;
837 *TypeSize = DL->getTypeStoreSizeInBits(RMW->getValOperand()->getType());
839 PtrOperand = RMW->getPointerOperand();
840 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
841 if (!ClInstrumentAtomics) return nullptr;
844 DL->getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
846 PtrOperand = XCHG->getPointerOperand();
849 // Treat memory accesses to promotable allocas as non-interesting since they
850 // will not cause memory violations. This greatly speeds up the instrumented
851 // executable at -O0.
852 if (ClSkipPromotableAllocas)
853 if (auto AI = dyn_cast_or_null<AllocaInst>(PtrOperand))
854 return isInterestingAlloca(*AI) ? AI : nullptr;
859 static bool isPointerOperand(Value *V) {
860 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
863 // This is a rough heuristic; it may cause both false positives and
864 // false negatives. The proper implementation requires cooperation with
866 static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) {
867 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
868 if (!Cmp->isRelational()) return false;
869 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
870 if (BO->getOpcode() != Instruction::Sub) return false;
874 if (!isPointerOperand(I->getOperand(0)) ||
875 !isPointerOperand(I->getOperand(1)))
880 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
881 // If a global variable does not have dynamic initialization we don't
882 // have to instrument it. However, if a global does not have initializer
883 // at all, we assume it has dynamic initializer (in other TU).
884 return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit;
887 void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
890 Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
891 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
892 for (int i = 0; i < 2; i++) {
893 if (Param[i]->getType()->isPointerTy())
894 Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy);
896 IRB.CreateCall2(F, Param[0], Param[1]);
899 void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
900 Instruction *I, bool UseCalls) {
901 bool IsWrite = false;
902 unsigned Alignment = 0;
903 uint64_t TypeSize = 0;
904 Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment);
907 if (ClOpt && ClOptGlobals) {
908 // If initialization order checking is disabled, a simple access to a
909 // dynamically initialized global is always valid.
911 dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, nullptr));
912 if (G != NULL && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
913 isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
914 NumOptimizedAccessesToGlobalVar++;
919 if (ClOpt && ClOptStack) {
920 // A direct inbounds access to a stack variable is always valid.
921 if (isa<AllocaInst>(GetUnderlyingObject(Addr, nullptr)) &&
922 isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
923 NumOptimizedAccessesToStackVar++;
929 NumInstrumentedWrites++;
931 NumInstrumentedReads++;
933 unsigned Granularity = 1 << Mapping.Scale;
934 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
935 // if the data is properly aligned.
936 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
938 (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
939 return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls);
940 // Instrument unusual size or unusual alignment.
941 // We can not do it with a single check, so we do 1-byte check for the first
942 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
943 // to report the actual access size.
945 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
946 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
948 IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size);
950 Value *LastByte = IRB.CreateIntToPtr(
951 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
953 instrumentAddress(I, I, Addr, 8, IsWrite, Size, false);
954 instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false);
958 // Validate the result of Module::getOrInsertFunction called for an interface
959 // function of AddressSanitizer. If the instrumented module defines a function
960 // with the same name, their prototypes must match, otherwise
961 // getOrInsertFunction returns a bitcast.
962 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
963 if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast);
964 FuncOrBitcast->dump();
966 "trying to redefine an AddressSanitizer "
967 "interface function");
970 Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
971 Value *Addr, bool IsWrite,
972 size_t AccessSizeIndex,
973 Value *SizeArgument) {
974 IRBuilder<> IRB(InsertBefore);
977 ? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument)
978 : IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr);
980 // We don't do Call->setDoesNotReturn() because the BB already has
981 // UnreachableInst at the end.
982 // This EmptyAsm is required to avoid callback merge.
983 IRB.CreateCall(EmptyAsm);
987 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
990 size_t Granularity = 1 << Mapping.Scale;
991 // Addr & (Granularity - 1)
992 Value *LastAccessedByte =
993 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
994 // (Addr & (Granularity - 1)) + size - 1
995 if (TypeSize / 8 > 1)
996 LastAccessedByte = IRB.CreateAdd(
997 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
998 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1000 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1001 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1002 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1005 void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1006 Instruction *InsertBefore, Value *Addr,
1007 uint32_t TypeSize, bool IsWrite,
1008 Value *SizeArgument, bool UseCalls) {
1009 IRBuilder<> IRB(InsertBefore);
1010 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1011 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
1014 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex],
1020 IntegerType::get(*C, std::max(8U, TypeSize >> Mapping.Scale));
1021 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
1022 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1023 Value *CmpVal = Constant::getNullValue(ShadowTy);
1024 Value *ShadowValue =
1025 IRB.CreateLoad(IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
1027 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
1028 size_t Granularity = 1 << Mapping.Scale;
1029 TerminatorInst *CrashTerm = nullptr;
1031 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
1032 // We use branch weights for the slow path check, to indicate that the slow
1033 // path is rarely taken. This seems to be the case for SPEC benchmarks.
1034 TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen(
1035 Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
1036 assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
1037 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1038 IRB.SetInsertPoint(CheckTerm);
1039 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
1040 BasicBlock *CrashBlock =
1041 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1042 CrashTerm = new UnreachableInst(*C, CrashBlock);
1043 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1044 ReplaceInstWithInst(CheckTerm, NewTerm);
1046 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true);
1049 Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
1050 AccessSizeIndex, SizeArgument);
1051 Crash->setDebugLoc(OrigIns->getDebugLoc());
1054 void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit,
1055 GlobalValue *ModuleName) {
1056 // Set up the arguments to our poison/unpoison functions.
1057 IRBuilder<> IRB(GlobalInit.begin()->getFirstInsertionPt());
1059 // Add a call to poison all external globals before the given function starts.
1060 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
1061 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1063 // Add calls to unpoison all globals before each return instruction.
1064 for (auto &BB : GlobalInit.getBasicBlockList())
1065 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1066 CallInst::Create(AsanUnpoisonGlobals, "", RI);
1069 void AddressSanitizerModule::createInitializerPoisonCalls(
1070 Module &M, GlobalValue *ModuleName) {
1071 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1073 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
1074 for (Use &OP : CA->operands()) {
1075 if (isa<ConstantAggregateZero>(OP)) continue;
1076 ConstantStruct *CS = cast<ConstantStruct>(OP);
1078 // Must have a function or null ptr.
1079 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
1080 if (F->getName() == kAsanModuleCtorName) continue;
1081 ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
1082 // Don't instrument CTORs that will run before asan.module_ctor.
1083 if (Priority->getLimitedValue() <= kAsanCtorAndDtorPriority) continue;
1084 poisonOneInitializer(*F, ModuleName);
1089 bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
1090 Type *Ty = cast<PointerType>(G->getType())->getElementType();
1091 DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
1093 if (GlobalsMD.get(G).IsBlacklisted) return false;
1094 if (!Ty->isSized()) return false;
1095 if (!G->hasInitializer()) return false;
1096 if (GlobalWasGeneratedByAsan(G)) return false; // Our own global.
1097 // Touch only those globals that will not be defined in other modules.
1098 // Don't handle ODR linkage types and COMDATs since other modules may be built
1100 if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
1101 G->getLinkage() != GlobalVariable::PrivateLinkage &&
1102 G->getLinkage() != GlobalVariable::InternalLinkage)
1104 if (G->hasComdat()) return false;
1105 // Two problems with thread-locals:
1106 // - The address of the main thread's copy can't be computed at link-time.
1107 // - Need to poison all copies, not just the main thread's one.
1108 if (G->isThreadLocal()) return false;
1109 // For now, just ignore this Global if the alignment is large.
1110 if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false;
1112 if (G->hasSection()) {
1113 StringRef Section(G->getSection());
1115 if (TargetTriple.isOSBinFormatMachO()) {
1116 StringRef ParsedSegment, ParsedSection;
1117 unsigned TAA = 0, StubSize = 0;
1119 std::string ErrorCode = MCSectionMachO::ParseSectionSpecifier(
1120 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize);
1121 if (!ErrorCode.empty()) {
1122 report_fatal_error("Invalid section specifier '" + ParsedSection +
1123 "': " + ErrorCode + ".");
1126 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
1127 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
1129 if (ParsedSegment == "__OBJC" ||
1130 (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) {
1131 DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
1134 // See http://code.google.com/p/address-sanitizer/issues/detail?id=32
1135 // Constant CFString instances are compiled in the following way:
1136 // -- the string buffer is emitted into
1137 // __TEXT,__cstring,cstring_literals
1138 // -- the constant NSConstantString structure referencing that buffer
1139 // is placed into __DATA,__cfstring
1140 // Therefore there's no point in placing redzones into __DATA,__cfstring.
1141 // Moreover, it causes the linker to crash on OS X 10.7
1142 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
1143 DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
1146 // The linker merges the contents of cstring_literals and removes the
1148 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
1149 DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
1154 // Callbacks put into the CRT initializer/terminator sections
1155 // should not be instrumented.
1156 // See https://code.google.com/p/address-sanitizer/issues/detail?id=305
1157 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
1158 if (Section.startswith(".CRT")) {
1159 DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n");
1163 // Globals from llvm.metadata aren't emitted, do not instrument them.
1164 if (Section == "llvm.metadata") return false;
1170 void AddressSanitizerModule::initializeCallbacks(Module &M) {
1171 IRBuilder<> IRB(*C);
1172 // Declare our poisoning and unpoisoning functions.
1173 AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1174 kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, nullptr));
1175 AsanPoisonGlobals->setLinkage(Function::ExternalLinkage);
1176 AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1177 kAsanUnpoisonGlobalsName, IRB.getVoidTy(), nullptr));
1178 AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage);
1179 // Declare functions that register/unregister globals.
1180 AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1181 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1182 AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
1183 AsanUnregisterGlobals = checkInterfaceFunction(
1184 M.getOrInsertFunction(kAsanUnregisterGlobalsName, IRB.getVoidTy(),
1185 IntptrTy, IntptrTy, nullptr));
1186 AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
1189 // This function replaces all global variables with new variables that have
1190 // trailing redzones. It also creates a function that poisons
1191 // redzones and inserts this function into llvm.global_ctors.
1192 bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
1195 SmallVector<GlobalVariable *, 16> GlobalsToChange;
1197 for (auto &G : M.globals()) {
1198 if (ShouldInstrumentGlobal(&G)) GlobalsToChange.push_back(&G);
1201 size_t n = GlobalsToChange.size();
1202 if (n == 0) return false;
1204 // A global is described by a structure
1207 // size_t size_with_redzone;
1208 // const char *name;
1209 // const char *module_name;
1210 // size_t has_dynamic_init;
1211 // void *source_location;
1212 // We initialize an array of such structures and pass it to a run-time call.
1213 StructType *GlobalStructTy =
1214 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
1215 IntptrTy, IntptrTy, nullptr);
1216 SmallVector<Constant *, 16> Initializers(n);
1218 bool HasDynamicallyInitializedGlobals = false;
1220 // We shouldn't merge same module names, as this string serves as unique
1221 // module ID in runtime.
1222 GlobalVariable *ModuleName = createPrivateGlobalForString(
1223 M, M.getModuleIdentifier(), /*AllowMerging*/ false);
1225 for (size_t i = 0; i < n; i++) {
1226 static const uint64_t kMaxGlobalRedzone = 1 << 18;
1227 GlobalVariable *G = GlobalsToChange[i];
1229 auto MD = GlobalsMD.get(G);
1230 // Create string holding the global name (use global name from metadata
1231 // if it's available, otherwise just write the name of global variable).
1232 GlobalVariable *Name = createPrivateGlobalForString(
1233 M, MD.Name.empty() ? G->getName() : MD.Name,
1234 /*AllowMerging*/ true);
1236 PointerType *PtrTy = cast<PointerType>(G->getType());
1237 Type *Ty = PtrTy->getElementType();
1238 uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
1239 uint64_t MinRZ = MinRedzoneSizeForGlobal();
1240 // MinRZ <= RZ <= kMaxGlobalRedzone
1241 // and trying to make RZ to be ~ 1/4 of SizeInBytes.
1242 uint64_t RZ = std::max(
1243 MinRZ, std::min(kMaxGlobalRedzone, (SizeInBytes / MinRZ / 4) * MinRZ));
1244 uint64_t RightRedzoneSize = RZ;
1245 // Round up to MinRZ
1246 if (SizeInBytes % MinRZ) RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
1247 assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
1248 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
1250 StructType *NewTy = StructType::get(Ty, RightRedZoneTy, nullptr);
1251 Constant *NewInitializer =
1252 ConstantStruct::get(NewTy, G->getInitializer(),
1253 Constant::getNullValue(RightRedZoneTy), nullptr);
1255 // Create a new global variable with enough space for a redzone.
1256 GlobalValue::LinkageTypes Linkage = G->getLinkage();
1257 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
1258 Linkage = GlobalValue::InternalLinkage;
1259 GlobalVariable *NewGlobal =
1260 new GlobalVariable(M, NewTy, G->isConstant(), Linkage, NewInitializer,
1261 "", G, G->getThreadLocalMode());
1262 NewGlobal->copyAttributesFrom(G);
1263 NewGlobal->setAlignment(MinRZ);
1266 Indices2[0] = IRB.getInt32(0);
1267 Indices2[1] = IRB.getInt32(0);
1269 G->replaceAllUsesWith(
1270 ConstantExpr::getGetElementPtr(NewGlobal, Indices2, true));
1271 NewGlobal->takeName(G);
1272 G->eraseFromParent();
1274 Constant *SourceLoc;
1275 if (!MD.SourceLoc.empty()) {
1276 auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc);
1277 SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy);
1279 SourceLoc = ConstantInt::get(IntptrTy, 0);
1282 Initializers[i] = ConstantStruct::get(
1283 GlobalStructTy, ConstantExpr::getPointerCast(NewGlobal, IntptrTy),
1284 ConstantInt::get(IntptrTy, SizeInBytes),
1285 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
1286 ConstantExpr::getPointerCast(Name, IntptrTy),
1287 ConstantExpr::getPointerCast(ModuleName, IntptrTy),
1288 ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, nullptr);
1290 if (ClInitializers && MD.IsDynInit) HasDynamicallyInitializedGlobals = true;
1292 DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
1295 ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n);
1296 GlobalVariable *AllGlobals = new GlobalVariable(
1297 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
1298 ConstantArray::get(ArrayOfGlobalStructTy, Initializers), "");
1300 // Create calls for poisoning before initializers run and unpoisoning after.
1301 if (HasDynamicallyInitializedGlobals)
1302 createInitializerPoisonCalls(M, ModuleName);
1303 IRB.CreateCall2(AsanRegisterGlobals,
1304 IRB.CreatePointerCast(AllGlobals, IntptrTy),
1305 ConstantInt::get(IntptrTy, n));
1307 // We also need to unregister globals at the end, e.g. when a shared library
1309 Function *AsanDtorFunction =
1310 Function::Create(FunctionType::get(Type::getVoidTy(*C), false),
1311 GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
1312 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
1313 IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB));
1314 IRB_Dtor.CreateCall2(AsanUnregisterGlobals,
1315 IRB.CreatePointerCast(AllGlobals, IntptrTy),
1316 ConstantInt::get(IntptrTy, n));
1317 appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority);
1323 bool AddressSanitizerModule::runOnModule(Module &M) {
1324 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
1325 if (!DLP) return false;
1326 DL = &DLP->getDataLayout();
1327 C = &(M.getContext());
1328 int LongSize = DL->getPointerSizeInBits();
1329 IntptrTy = Type::getIntNTy(*C, LongSize);
1330 TargetTriple = Triple(M.getTargetTriple());
1331 Mapping = getShadowMapping(TargetTriple, LongSize);
1332 initializeCallbacks(M);
1334 bool Changed = false;
1336 Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
1338 IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
1340 if (ClGlobals) Changed |= InstrumentGlobals(IRB, M);
1345 void AddressSanitizer::initializeCallbacks(Module &M) {
1346 IRBuilder<> IRB(*C);
1347 // Create __asan_report* callbacks.
1348 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
1349 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
1350 AccessSizeIndex++) {
1351 // IsWrite and TypeSize are encoded in the function name.
1352 std::string Suffix =
1353 (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex);
1354 AsanErrorCallback[AccessIsWrite][AccessSizeIndex] =
1355 checkInterfaceFunction(
1356 M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix,
1357 IRB.getVoidTy(), IntptrTy, nullptr));
1358 AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
1359 checkInterfaceFunction(
1360 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix,
1361 IRB.getVoidTy(), IntptrTy, nullptr));
1364 AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction(
1365 kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1366 AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction(
1367 kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1369 AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction(
1370 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN",
1371 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1372 AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction(
1373 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN",
1374 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1376 AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction(
1377 ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
1378 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
1379 AsanMemcpy = checkInterfaceFunction(M.getOrInsertFunction(
1380 ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
1381 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
1382 AsanMemset = checkInterfaceFunction(M.getOrInsertFunction(
1383 ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(),
1384 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr));
1386 AsanHandleNoReturnFunc = checkInterfaceFunction(
1387 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), nullptr));
1389 AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction(
1390 kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1391 AsanPtrSubFunction = checkInterfaceFunction(M.getOrInsertFunction(
1392 kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1393 // We insert an empty inline asm after __asan_report* to avoid callback merge.
1394 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
1395 StringRef(""), StringRef(""),
1396 /*hasSideEffects=*/true);
1400 bool AddressSanitizer::doInitialization(Module &M) {
1401 // Initialize the private fields. No one has accessed them before.
1402 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
1403 if (!DLP) report_fatal_error("data layout missing");
1404 DL = &DLP->getDataLayout();
1408 C = &(M.getContext());
1409 LongSize = DL->getPointerSizeInBits();
1410 IntptrTy = Type::getIntNTy(*C, LongSize);
1411 TargetTriple = Triple(M.getTargetTriple());
1414 Function::Create(FunctionType::get(Type::getVoidTy(*C), false),
1415 GlobalValue::InternalLinkage, kAsanModuleCtorName, &M);
1416 BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction);
1417 // call __asan_init in the module ctor.
1418 IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB));
1419 AsanInitFunction = checkInterfaceFunction(
1420 M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), nullptr));
1421 AsanInitFunction->setLinkage(Function::ExternalLinkage);
1422 IRB.CreateCall(AsanInitFunction);
1424 Mapping = getShadowMapping(TargetTriple, LongSize);
1426 appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
1430 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
1431 // For each NSObject descendant having a +load method, this method is invoked
1432 // by the ObjC runtime before any of the static constructors is called.
1433 // Therefore we need to instrument such methods with a call to __asan_init
1434 // at the beginning in order to initialize our runtime before any access to
1435 // the shadow memory.
1436 // We cannot just ignore these methods, because they may call other
1437 // instrumented functions.
1438 if (F.getName().find(" load]") != std::string::npos) {
1439 IRBuilder<> IRB(F.begin()->begin());
1440 IRB.CreateCall(AsanInitFunction);
1446 bool AddressSanitizer::runOnFunction(Function &F) {
1447 if (&F == AsanCtorFunction) return false;
1448 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
1449 DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
1450 initializeCallbacks(*F.getParent());
1452 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1454 // If needed, insert __asan_init before checking for SanitizeAddress attr.
1455 maybeInsertAsanInitAtFunctionEntry(F);
1457 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return false;
1459 if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) return false;
1461 // We want to instrument every address only once per basic block (unless there
1462 // are calls between uses).
1463 SmallSet<Value *, 16> TempsToInstrument;
1464 SmallVector<Instruction *, 16> ToInstrument;
1465 SmallVector<Instruction *, 8> NoReturnCalls;
1466 SmallVector<BasicBlock *, 16> AllBlocks;
1467 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
1473 // Fill the set of memory operations to instrument.
1474 for (auto &BB : F) {
1475 AllBlocks.push_back(&BB);
1476 TempsToInstrument.clear();
1477 int NumInsnsPerBB = 0;
1478 for (auto &Inst : BB) {
1479 if (LooksLikeCodeInBug11395(&Inst)) return false;
1480 if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
1482 if (ClOpt && ClOptSameTemp) {
1483 if (!TempsToInstrument.insert(Addr).second)
1484 continue; // We've seen this temp in the current BB.
1486 } else if (ClInvalidPointerPairs &&
1487 isInterestingPointerComparisonOrSubtraction(&Inst)) {
1488 PointerComparisonsOrSubtracts.push_back(&Inst);
1490 } else if (isa<MemIntrinsic>(Inst)) {
1493 if (isa<AllocaInst>(Inst)) NumAllocas++;
1496 // A call inside BB.
1497 TempsToInstrument.clear();
1498 if (CS.doesNotReturn()) NoReturnCalls.push_back(CS.getInstruction());
1502 ToInstrument.push_back(&Inst);
1504 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
1508 bool UseCalls = false;
1509 if (ClInstrumentationWithCallsThreshold >= 0 &&
1510 ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold)
1513 const TargetLibraryInfo *TLI =
1514 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1515 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext(),
1516 /*RoundToAlign=*/true);
1519 int NumInstrumented = 0;
1520 for (auto Inst : ToInstrument) {
1521 if (ClDebugMin < 0 || ClDebugMax < 0 ||
1522 (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
1523 if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment))
1524 instrumentMop(ObjSizeVis, Inst, UseCalls);
1526 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
1531 FunctionStackPoisoner FSP(F, *this);
1532 bool ChangedStack = FSP.runOnFunction();
1534 // We must unpoison the stack before every NoReturn call (throw, _exit, etc).
1535 // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
1536 for (auto CI : NoReturnCalls) {
1537 IRBuilder<> IRB(CI);
1538 IRB.CreateCall(AsanHandleNoReturnFunc);
1541 for (auto Inst : PointerComparisonsOrSubtracts) {
1542 instrumentPointerComparisonOrSubtraction(Inst);
1546 bool res = NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty();
1548 DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n");
1553 // Workaround for bug 11395: we don't want to instrument stack in functions
1554 // with large assembly blobs (32-bit only), otherwise reg alloc may crash.
1555 // FIXME: remove once the bug 11395 is fixed.
1556 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
1557 if (LongSize != 32) return false;
1558 CallInst *CI = dyn_cast<CallInst>(I);
1559 if (!CI || !CI->isInlineAsm()) return false;
1560 if (CI->getNumArgOperands() <= 5) return false;
1561 // We have inline assembly with quite a few arguments.
1565 void FunctionStackPoisoner::initializeCallbacks(Module &M) {
1566 IRBuilder<> IRB(*C);
1567 for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) {
1568 std::string Suffix = itostr(i);
1569 AsanStackMallocFunc[i] = checkInterfaceFunction(M.getOrInsertFunction(
1570 kAsanStackMallocNameTemplate + Suffix, IntptrTy, IntptrTy, nullptr));
1571 AsanStackFreeFunc[i] = checkInterfaceFunction(
1572 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
1573 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1575 AsanPoisonStackMemoryFunc = checkInterfaceFunction(
1576 M.getOrInsertFunction(kAsanPoisonStackMemoryName, IRB.getVoidTy(),
1577 IntptrTy, IntptrTy, nullptr));
1578 AsanUnpoisonStackMemoryFunc = checkInterfaceFunction(
1579 M.getOrInsertFunction(kAsanUnpoisonStackMemoryName, IRB.getVoidTy(),
1580 IntptrTy, IntptrTy, nullptr));
1583 void FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes,
1584 IRBuilder<> &IRB, Value *ShadowBase,
1586 size_t n = ShadowBytes.size();
1588 // We need to (un)poison n bytes of stack shadow. Poison as many as we can
1589 // using 64-bit stores (if we are on 64-bit arch), then poison the rest
1590 // with 32-bit stores, then with 16-byte stores, then with 8-byte stores.
1591 for (size_t LargeStoreSizeInBytes = ASan.LongSize / 8;
1592 LargeStoreSizeInBytes != 0; LargeStoreSizeInBytes /= 2) {
1593 for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) {
1595 for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {
1596 if (ASan.DL->isLittleEndian())
1597 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
1599 Val = (Val << 8) | ShadowBytes[i + j];
1602 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
1603 Type *StoreTy = Type::getIntNTy(*C, LargeStoreSizeInBytes * 8);
1604 Value *Poison = ConstantInt::get(StoreTy, DoPoison ? Val : 0);
1605 IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, StoreTy->getPointerTo()));
1610 // Fake stack allocator (asan_fake_stack.h) has 11 size classes
1611 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
1612 static int StackMallocSizeClass(uint64_t LocalStackSize) {
1613 assert(LocalStackSize <= kMaxStackMallocSize);
1614 uint64_t MaxSize = kMinStackMallocSize;
1615 for (int i = 0;; i++, MaxSize *= 2)
1616 if (LocalStackSize <= MaxSize) return i;
1617 llvm_unreachable("impossible LocalStackSize");
1620 // Set Size bytes starting from ShadowBase to kAsanStackAfterReturnMagic.
1621 // We can not use MemSet intrinsic because it may end up calling the actual
1622 // memset. Size is a multiple of 8.
1623 // Currently this generates 8-byte stores on x86_64; it may be better to
1624 // generate wider stores.
1625 void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
1626 IRBuilder<> &IRB, Value *ShadowBase, int Size) {
1627 assert(!(Size % 8));
1628 assert(kAsanStackAfterReturnMagic == 0xf5);
1629 for (int i = 0; i < Size; i += 8) {
1630 Value *p = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
1631 IRB.CreateStore(ConstantInt::get(IRB.getInt64Ty(), 0xf5f5f5f5f5f5f5f5ULL),
1632 IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo()));
1636 static DebugLoc getFunctionEntryDebugLocation(Function &F) {
1637 for (const auto &Inst : F.getEntryBlock())
1638 if (!isa<AllocaInst>(Inst)) return Inst.getDebugLoc();
1642 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
1644 Instruction *ThenTerm,
1645 Value *ValueIfFalse) {
1646 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
1647 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
1648 PHI->addIncoming(ValueIfFalse, CondBlock);
1649 BasicBlock *ThenBlock = ThenTerm->getParent();
1650 PHI->addIncoming(ValueIfTrue, ThenBlock);
1654 Value *FunctionStackPoisoner::createAllocaForLayout(
1655 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
1658 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
1659 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
1662 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
1663 nullptr, "MyAlloca");
1664 assert(Alloca->isStaticAlloca());
1666 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
1667 size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack);
1668 Alloca->setAlignment(FrameAlignment);
1669 return IRB.CreatePointerCast(Alloca, IntptrTy);
1672 void FunctionStackPoisoner::poisonStack() {
1673 assert(AllocaVec.size() > 0 || DynamicAllocaVec.size() > 0);
1675 if (ClInstrumentAllocas) {
1676 // Handle dynamic allocas.
1677 for (auto &AllocaCall : DynamicAllocaVec) {
1678 handleDynamicAllocaCall(AllocaCall);
1679 unpoisonDynamicAlloca(AllocaCall);
1683 if (AllocaVec.size() == 0) return;
1685 int StackMallocIdx = -1;
1686 DebugLoc EntryDebugLocation = getFunctionEntryDebugLocation(F);
1688 Instruction *InsBefore = AllocaVec[0];
1689 IRBuilder<> IRB(InsBefore);
1690 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1692 SmallVector<ASanStackVariableDescription, 16> SVD;
1693 SVD.reserve(AllocaVec.size());
1694 for (AllocaInst *AI : AllocaVec) {
1695 ASanStackVariableDescription D = {AI->getName().data(),
1696 ASan.getAllocaSizeInBytes(AI),
1697 AI->getAlignment(), AI, 0};
1700 // Minimal header size (left redzone) is 4 pointers,
1701 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
1702 size_t MinHeaderSize = ASan.LongSize / 2;
1703 ASanStackFrameLayout L;
1704 ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L);
1705 DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n");
1706 uint64_t LocalStackSize = L.FrameSize;
1707 bool DoStackMalloc =
1708 ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize;
1709 // Don't do dynamic alloca in presence of inline asm: too often it
1710 // makes assumptions on which registers are available.
1711 bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm;
1713 Value *StaticAlloca =
1714 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
1717 Value *LocalStackBase;
1719 if (DoStackMalloc) {
1720 // void *FakeStack = __asan_option_detect_stack_use_after_return
1721 // ? __asan_stack_malloc_N(LocalStackSize)
1723 // void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize);
1724 Constant *OptionDetectUAR = F.getParent()->getOrInsertGlobal(
1725 kAsanOptionDetectUAR, IRB.getInt32Ty());
1726 Value *UARIsEnabled =
1727 IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUAR),
1728 Constant::getNullValue(IRB.getInt32Ty()));
1730 SplitBlockAndInsertIfThen(UARIsEnabled, InsBefore, false);
1731 IRBuilder<> IRBIf(Term);
1732 IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
1733 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
1734 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
1735 Value *FakeStackValue =
1736 IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx],
1737 ConstantInt::get(IntptrTy, LocalStackSize));
1738 IRB.SetInsertPoint(InsBefore);
1739 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1740 FakeStack = createPHI(IRB, UARIsEnabled, FakeStackValue, Term,
1741 ConstantInt::get(IntptrTy, 0));
1743 Value *NoFakeStack =
1744 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
1745 Term = SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
1746 IRBIf.SetInsertPoint(Term);
1747 IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
1748 Value *AllocaValue =
1749 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
1750 IRB.SetInsertPoint(InsBefore);
1751 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1752 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
1754 // void *FakeStack = nullptr;
1755 // void *LocalStackBase = alloca(LocalStackSize);
1756 FakeStack = ConstantInt::get(IntptrTy, 0);
1758 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
1761 // Insert poison calls for lifetime intrinsics for alloca.
1762 bool HavePoisonedAllocas = false;
1763 for (const auto &APC : AllocaPoisonCallVec) {
1764 assert(APC.InsBefore);
1766 IRBuilder<> IRB(APC.InsBefore);
1767 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
1768 HavePoisonedAllocas |= APC.DoPoison;
1771 // Replace Alloca instructions with base+offset.
1772 for (const auto &Desc : SVD) {
1773 AllocaInst *AI = Desc.AI;
1774 Value *NewAllocaPtr = IRB.CreateIntToPtr(
1775 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
1777 replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB, /*Deref=*/true);
1778 AI->replaceAllUsesWith(NewAllocaPtr);
1781 // The left-most redzone has enough space for at least 4 pointers.
1782 // Write the Magic value to redzone[0].
1783 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
1784 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
1786 // Write the frame description constant to redzone[1].
1787 Value *BasePlus1 = IRB.CreateIntToPtr(
1788 IRB.CreateAdd(LocalStackBase,
1789 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
1791 GlobalVariable *StackDescriptionGlobal =
1792 createPrivateGlobalForString(*F.getParent(), L.DescriptionString,
1793 /*AllowMerging*/ true);
1794 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
1795 IRB.CreateStore(Description, BasePlus1);
1796 // Write the PC to redzone[2].
1797 Value *BasePlus2 = IRB.CreateIntToPtr(
1798 IRB.CreateAdd(LocalStackBase,
1799 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
1801 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
1803 // Poison the stack redzones at the entry.
1804 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
1805 poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true);
1807 // (Un)poison the stack before all ret instructions.
1808 for (auto Ret : RetVec) {
1809 IRBuilder<> IRBRet(Ret);
1810 // Mark the current frame as retired.
1811 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
1813 if (DoStackMalloc) {
1814 assert(StackMallocIdx >= 0);
1815 // if FakeStack != 0 // LocalStackBase == FakeStack
1816 // // In use-after-return mode, poison the whole stack frame.
1817 // if StackMallocIdx <= 4
1818 // // For small sizes inline the whole thing:
1819 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
1820 // **SavedFlagPtr(FakeStack) = 0
1822 // __asan_stack_free_N(FakeStack, LocalStackSize)
1824 // <This is not a fake stack; unpoison the redzones>
1826 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
1827 TerminatorInst *ThenTerm, *ElseTerm;
1828 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
1830 IRBuilder<> IRBPoison(ThenTerm);
1831 if (StackMallocIdx <= 4) {
1832 int ClassSize = kMinStackMallocSize << StackMallocIdx;
1833 SetShadowToStackAfterReturnInlined(IRBPoison, ShadowBase,
1834 ClassSize >> Mapping.Scale);
1835 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
1837 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
1838 Value *SavedFlagPtr = IRBPoison.CreateLoad(
1839 IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
1840 IRBPoison.CreateStore(
1841 Constant::getNullValue(IRBPoison.getInt8Ty()),
1842 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
1844 // For larger frames call __asan_stack_free_*.
1845 IRBPoison.CreateCall2(AsanStackFreeFunc[StackMallocIdx], FakeStack,
1846 ConstantInt::get(IntptrTy, LocalStackSize));
1849 IRBuilder<> IRBElse(ElseTerm);
1850 poisonRedZones(L.ShadowBytes, IRBElse, ShadowBase, false);
1851 } else if (HavePoisonedAllocas) {
1852 // If we poisoned some allocas in llvm.lifetime analysis,
1853 // unpoison whole stack frame now.
1854 poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false);
1856 poisonRedZones(L.ShadowBytes, IRBRet, ShadowBase, false);
1860 // We are done. Remove the old unused alloca instructions.
1861 for (auto AI : AllocaVec) AI->eraseFromParent();
1864 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
1865 IRBuilder<> &IRB, bool DoPoison) {
1866 // For now just insert the call to ASan runtime.
1867 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
1868 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
1870 DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
1874 // Handling llvm.lifetime intrinsics for a given %alloca:
1875 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
1876 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
1877 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory
1878 // could be poisoned by previous llvm.lifetime.end instruction, as the
1879 // variable may go in and out of scope several times, e.g. in loops).
1880 // (3) if we poisoned at least one %alloca in a function,
1881 // unpoison the whole stack frame at function exit.
1883 AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
1884 if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
1885 // We're intested only in allocas we can handle.
1886 return ASan.isInterestingAlloca(*AI) ? AI : nullptr;
1887 // See if we've already calculated (or started to calculate) alloca for a
1889 AllocaForValueMapTy::iterator I = AllocaForValue.find(V);
1890 if (I != AllocaForValue.end()) return I->second;
1891 // Store 0 while we're calculating alloca for value V to avoid
1892 // infinite recursion if the value references itself.
1893 AllocaForValue[V] = nullptr;
1894 AllocaInst *Res = nullptr;
1895 if (CastInst *CI = dyn_cast<CastInst>(V))
1896 Res = findAllocaForValue(CI->getOperand(0));
1897 else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1898 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1899 Value *IncValue = PN->getIncomingValue(i);
1900 // Allow self-referencing phi-nodes.
1901 if (IncValue == PN) continue;
1902 AllocaInst *IncValueAI = findAllocaForValue(IncValue);
1903 // AI for incoming values should exist and should all be equal.
1904 if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res))
1909 if (Res) AllocaForValue[V] = Res;
1913 // Compute PartialRzMagic for dynamic alloca call. PartialRzMagic is
1914 // constructed from two separate 32-bit numbers: PartialRzMagic = Val1 | Val2.
1915 // (1) Val1 is resposible for forming base value for PartialRzMagic, containing
1916 // only 00 for fully addressable and 0xcb for fully poisoned bytes for each
1917 // 8-byte chunk of user memory respectively.
1918 // (2) Val2 forms the value for marking first poisoned byte in shadow memory
1919 // with appropriate value (0x01 - 0x07 or 0xcb if Padding % 8 == 0).
1921 // Shift = Padding & ~7; // the number of bits we need to shift to access first
1922 // chunk in shadow memory, containing nonzero bytes.
1924 // Padding = 21 Padding = 16
1925 // Shadow: |00|00|05|cb| Shadow: |00|00|cb|cb|
1928 // Shift = 21 & ~7 = 16 Shift = 16 & ~7 = 16
1930 // Val1 = 0xcbcbcbcb << Shift;
1931 // PartialBits = Padding ? Padding & 7 : 0xcb;
1932 // Val2 = PartialBits << Shift;
1933 // Result = Val1 | Val2;
1934 Value *FunctionStackPoisoner::computePartialRzMagic(Value *PartialSize,
1936 PartialSize = IRB.CreateIntCast(PartialSize, IRB.getInt32Ty(), false);
1937 Value *Shift = IRB.CreateAnd(PartialSize, IRB.getInt32(~7));
1938 unsigned Val1Int = kAsanAllocaPartialVal1;
1939 unsigned Val2Int = kAsanAllocaPartialVal2;
1940 if (!ASan.DL->isLittleEndian()) {
1941 Val1Int = sys::getSwappedBytes(Val1Int);
1942 Val2Int = sys::getSwappedBytes(Val2Int);
1944 Value *Val1 = shiftAllocaMagic(IRB.getInt32(Val1Int), IRB, Shift);
1945 Value *PartialBits = IRB.CreateAnd(PartialSize, IRB.getInt32(7));
1946 // For BigEndian get 0x000000YZ -> 0xYZ000000.
1947 if (ASan.DL->isBigEndian())
1948 PartialBits = IRB.CreateShl(PartialBits, IRB.getInt32(24));
1949 Value *Val2 = IRB.getInt32(Val2Int);
1951 IRB.CreateICmpNE(PartialBits, Constant::getNullValue(IRB.getInt32Ty()));
1952 Val2 = IRB.CreateSelect(Cond, shiftAllocaMagic(PartialBits, IRB, Shift),
1953 shiftAllocaMagic(Val2, IRB, Shift));
1954 return IRB.CreateOr(Val1, Val2);
1957 void FunctionStackPoisoner::handleDynamicAllocaCall(
1958 DynamicAllocaCall &AllocaCall) {
1959 AllocaInst *AI = AllocaCall.AI;
1960 if (!doesDominateAllExits(AI)) {
1961 // We do not yet handle complex allocas
1962 AllocaCall.Poison = false;
1966 IRBuilder<> IRB(AI);
1968 PointerType *Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty());
1969 const unsigned Align = std::max(kAllocaRzSize, AI->getAlignment());
1970 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
1972 Value *Zero = Constant::getNullValue(IntptrTy);
1973 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
1974 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
1975 Value *NotAllocaRzMask = ConstantInt::get(IntptrTy, ~AllocaRedzoneMask);
1977 // Since we need to extend alloca with additional memory to locate
1978 // redzones, and OldSize is number of allocated blocks with
1979 // ElementSize size, get allocated memory size in bytes by
1980 // OldSize * ElementSize.
1981 unsigned ElementSize = ASan.DL->getTypeAllocSize(AI->getAllocatedType());
1982 Value *OldSize = IRB.CreateMul(AI->getArraySize(),
1983 ConstantInt::get(IntptrTy, ElementSize));
1985 // PartialSize = OldSize % 32
1986 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
1988 // Misalign = kAllocaRzSize - PartialSize;
1989 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
1991 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
1992 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
1993 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
1995 // AdditionalChunkSize = Align + PartialPadding + kAllocaRzSize
1996 // Align is added to locate left redzone, PartialPadding for possible
1997 // partial redzone and kAllocaRzSize for right redzone respectively.
1998 Value *AdditionalChunkSize = IRB.CreateAdd(
1999 ConstantInt::get(IntptrTy, Align + kAllocaRzSize), PartialPadding);
2001 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
2003 // Insert new alloca with new NewSize and Align params.
2004 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
2005 NewAlloca->setAlignment(Align);
2007 // NewAddress = Address + Align
2008 Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
2009 ConstantInt::get(IntptrTy, Align));
2011 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
2013 // LeftRzAddress = NewAddress - kAllocaRzSize
2014 Value *LeftRzAddress = IRB.CreateSub(NewAddress, AllocaRzSize);
2016 // Poisoning left redzone.
2017 AllocaCall.LeftRzAddr = ASan.memToShadow(LeftRzAddress, IRB);
2018 IRB.CreateStore(ConstantInt::get(IRB.getInt32Ty(), kAsanAllocaLeftMagic),
2019 IRB.CreateIntToPtr(AllocaCall.LeftRzAddr, Int32PtrTy));
2021 // PartialRzAligned = PartialRzAddr & ~AllocaRzMask
2022 Value *PartialRzAddr = IRB.CreateAdd(NewAddress, OldSize);
2023 Value *PartialRzAligned = IRB.CreateAnd(PartialRzAddr, NotAllocaRzMask);
2025 // Poisoning partial redzone.
2026 Value *PartialRzMagic = computePartialRzMagic(PartialSize, IRB);
2027 Value *PartialRzShadowAddr = ASan.memToShadow(PartialRzAligned, IRB);
2028 IRB.CreateStore(PartialRzMagic,
2029 IRB.CreateIntToPtr(PartialRzShadowAddr, Int32PtrTy));
2032 // = (PartialRzAddr + AllocaRzMask) & ~AllocaRzMask
2033 Value *RightRzAddress = IRB.CreateAnd(
2034 IRB.CreateAdd(PartialRzAddr, AllocaRzMask), NotAllocaRzMask);
2036 // Poisoning right redzone.
2037 AllocaCall.RightRzAddr = ASan.memToShadow(RightRzAddress, IRB);
2038 IRB.CreateStore(ConstantInt::get(IRB.getInt32Ty(), kAsanAllocaRightMagic),
2039 IRB.CreateIntToPtr(AllocaCall.RightRzAddr, Int32PtrTy));
2041 // Replace all uses of AddessReturnedByAlloca with NewAddress.
2042 AI->replaceAllUsesWith(NewAddressPtr);
2044 // We are done. Erase old alloca and store left, partial and right redzones
2045 // shadow addresses for future unpoisoning.
2046 AI->eraseFromParent();
2047 NumInstrumentedDynamicAllocas++;
2050 // isSafeAccess returns true if Addr is always inbounds with respect to its
2051 // base object. For example, it is a field access or an array access with
2052 // constant inbounds index.
2053 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
2054 Value *Addr, uint64_t TypeSize) const {
2055 SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr);
2056 if (!ObjSizeVis.bothKnown(SizeOffset)) return false;
2057 int64_t Size = SizeOffset.first.getSExtValue();
2058 int64_t Offset = SizeOffset.second.getSExtValue();
2059 // Three checks are required to ensure safety:
2060 // . Offset >= 0 (since the offset is given from the base ptr)
2061 // . Size >= Offset (unsigned)
2062 // . Size - Offset >= NeededSize (unsigned)
2063 return Offset >= 0 && Size >= Offset &&
2064 uint64_t(Size - Offset) >= TypeSize / 8;