X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FInstrumentation%2FAddressSanitizer.cpp;h=27c97f25044a8dac5ebad520665a01ab5c689dd8;hb=9aef4445a34dc182556ee84b4d446a5afe9daac0;hp=2ee0f9d4a236489ccd148e0fe4a5709b0c004fb0;hpb=0b956507cab3dc4b06b310d2674bb35c79f46dc0;p=oota-llvm.git diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 2ee0f9d4a23..27c97f25044 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -23,6 +23,7 @@ #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" #include "llvm/DIBuilder.h" @@ -32,6 +33,7 @@ #include "llvm/IR/InlineAsm.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" +#include "llvm/IR/MDBuilder.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/InstVisitor.h" @@ -40,8 +42,8 @@ #include "llvm/Support/DataTypes.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Endian.h" -#include "llvm/Support/raw_ostream.h" #include "llvm/Support/system_error.h" +#include "llvm/Transforms/Utils/ASanStackFrameLayout.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Local.h" @@ -55,9 +57,11 @@ using namespace llvm; static const uint64_t kDefaultShadowScale = 3; static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; -static const uint64_t kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G. +static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G. static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41; static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa8000; +static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; +static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; static const size_t kMinStackMallocSize = 1 << 6; // 64B static const size_t kMaxStackMallocSize = 1 << 16; // 64K @@ -76,9 +80,10 @@ static const char *const kAsanUnregisterGlobalsName = static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init"; static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init"; static const char *const kAsanInitName = "__asan_init_v3"; +static const char *const kAsanCovName = "__sanitizer_cov"; +static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp"; +static const char *const kAsanPtrSub = "__sanitizer_ptr_sub"; static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return"; -static const char *const kAsanMappingOffsetName = "__asan_mapping_offset"; -static const char *const kAsanMappingScaleName = "__asan_mapping_scale"; static const int kMaxAsanStackMallocSizeClass = 10; static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_"; static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_"; @@ -88,11 +93,9 @@ static const char *const kAsanPoisonStackMemoryName = static const char *const kAsanUnpoisonStackMemoryName = "__asan_unpoison_stack_memory"; -// These constants must match the definitions in the run-time library. -static const int kAsanStackLeftRedzoneMagic = 0xf1; -static const int kAsanStackMidRedzoneMagic = 0xf2; -static const int kAsanStackRightRedzoneMagic = 0xf3; -static const int kAsanStackPartialRedzoneMagic = 0xf4; +static const char *const kAsanOptionDetectUAR = + "__asan_option_detect_stack_use_after_return"; + #ifndef NDEBUG static const int kAsanStackAfterReturnMagic = 0xf5; #endif @@ -130,12 +133,19 @@ static cl::opt ClUseAfterReturn("asan-use-after-return", // This flag may need to be replaced with -f[no]asan-globals. static cl::opt ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true)); +static cl::opt ClCoverage("asan-coverage", + cl::desc("ASan coverage. 0: none, 1: entry block, 2: all blocks"), + cl::Hidden, cl::init(false)); static cl::opt ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(false)); static cl::opt ClMemIntrin("asan-memintrin", cl::desc("Handle memset/memcpy/memmove"), cl::Hidden, cl::init(true)); -static cl::opt ClRealignStack("asan-realign-stack", - cl::desc("Realign stack to 32"), cl::Hidden, cl::init(true)); +static cl::opt ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", + cl::desc("Instrument <, <=, >, >=, - with pointer operands"), + cl::Hidden, cl::init(false)); +static cl::opt ClRealignStack("asan-realign-stack", + cl::desc("Realign stack to the value of this flag (power of two)"), + cl::Hidden, cl::init(32)); static cl::opt ClBlacklistFile("asan-blacklist", cl::desc("File containing the list of objects to ignore " "during instrumentation"), cl::Hidden); @@ -158,11 +168,6 @@ static cl::opt ClKeepUninstrumented("asan-keep-uninstrumented-functions", // Shadow = (Mem >> scale) + (1 << offset_log) static cl::opt ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0)); -static cl::opt ClMappingOffsetLog("asan-mapping-offset-log", - cl::desc("offset of asan shadow mapping"), cl::Hidden, cl::init(-1)); -static cl::opt ClShort64BitOffset("asan-short-64bit-mapping-offset", - cl::desc("Use short immediate constant as the mapping offset for 64bit"), - cl::Hidden, cl::init(true)); // Optimization flags. Not user visible, used mostly for testing // and benchmarking the tool. @@ -190,6 +195,13 @@ static cl::opt ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), static cl::opt ClDebugMax("asan-debug-max", cl::desc("Debug man inst"), cl::Hidden, cl::init(-1)); +STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); +STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); +STATISTIC(NumOptimizedAccessesToGlobalArray, + "Number of optimized accesses to global arrays"); +STATISTIC(NumOptimizedAccessesToGlobalVar, + "Number of optimized accesses to global vars"); + namespace { /// A set of dynamically initialized globals extracted from metadata. class SetOfDynamicallyInitializedGlobals { @@ -224,11 +236,12 @@ struct ShadowMapping { bool OrShadowOffset; }; -static ShadowMapping getShadowMapping(const Module &M, int LongSize, - bool ZeroBaseShadow) { +static ShadowMapping getShadowMapping(const Module &M, int LongSize) { llvm::Triple TargetTriple(M.getTargetTriple()); bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android; - bool IsMacOSX = TargetTriple.getOS() == llvm::Triple::MacOSX; + // bool IsMacOSX = TargetTriple.getOS() == llvm::Triple::MacOSX; + bool IsFreeBSD = TargetTriple.getOS() == llvm::Triple::FreeBSD; + bool IsLinux = TargetTriple.getOS() == llvm::Triple::Linux; bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 || TargetTriple.getArch() == llvm::Triple::ppc64le; bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64; @@ -237,22 +250,24 @@ static ShadowMapping getShadowMapping(const Module &M, int LongSize, ShadowMapping Mapping; - // OR-ing shadow offset if more efficient (at least on x86), - // but on ppc64 we have to use add since the shadow offset is not neccesary - // 1/8-th of the address space. - Mapping.OrShadowOffset = !IsPPC64 && !ClShort64BitOffset; - - Mapping.Offset = (IsAndroid || ZeroBaseShadow) ? 0 : - (LongSize == 32 ? - (IsMIPS32 ? kMIPS32_ShadowOffset32 : kDefaultShadowOffset32) : - IsPPC64 ? kPPC64_ShadowOffset64 : kDefaultShadowOffset64); - if (!ZeroBaseShadow && ClShort64BitOffset && IsX86_64 && !IsMacOSX) { - assert(LongSize == 64); - Mapping.Offset = kDefaultShort64bitShadowOffset; - } - if (!ZeroBaseShadow && ClMappingOffsetLog >= 0) { - // Zero offset log is the special case. - Mapping.Offset = (ClMappingOffsetLog == 0) ? 0 : 1ULL << ClMappingOffsetLog; + if (LongSize == 32) { + if (IsAndroid) + Mapping.Offset = 0; + else if (IsMIPS32) + Mapping.Offset = kMIPS32_ShadowOffset32; + else if (IsFreeBSD) + Mapping.Offset = kFreeBSD_ShadowOffset32; + else + Mapping.Offset = kDefaultShadowOffset32; + } else { // LongSize == 64 + if (IsPPC64) + Mapping.Offset = kPPC64_ShadowOffset64; + else if (IsFreeBSD) + Mapping.Offset = kFreeBSD_ShadowOffset64; + else if (IsLinux && IsX86_64) + Mapping.Offset = kSmallX86_64ShadowOffset; + else + Mapping.Offset = kDefaultShadowOffset64; } Mapping.Scale = kDefaultShadowScale; @@ -260,6 +275,11 @@ static ShadowMapping getShadowMapping(const Module &M, int LongSize, Mapping.Scale = ClMappingScale; } + // OR-ing shadow offset if more efficient (at least on x86) if the offset + // is a power of two, but on ppc64 we have to use add since the shadow + // offset is not necessary 1/8-th of the address space. + Mapping.OrShadowOffset = !IsPPC64 && !(Mapping.Offset & (Mapping.Offset - 1)); + return Mapping; } @@ -274,19 +294,18 @@ struct AddressSanitizer : public FunctionPass { AddressSanitizer(bool CheckInitOrder = true, bool CheckUseAfterReturn = false, bool CheckLifetime = false, - StringRef BlacklistFile = StringRef(), - bool ZeroBaseShadow = false) + StringRef BlacklistFile = StringRef()) : FunctionPass(ID), CheckInitOrder(CheckInitOrder || ClInitializers), CheckUseAfterReturn(CheckUseAfterReturn || ClUseAfterReturn), CheckLifetime(CheckLifetime || ClCheckLifetime), BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile - : BlacklistFile), - ZeroBaseShadow(ZeroBaseShadow) {} + : BlacklistFile) {} virtual const char *getPassName() const { return "AddressSanitizerFunctionPass"; } void instrumentMop(Instruction *I); + void instrumentPointerComparisonOrSubtraction(Instruction *I); void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, bool IsWrite, Value *SizeArgument); @@ -302,7 +321,6 @@ struct AddressSanitizer : public FunctionPass { Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); bool runOnFunction(Function &F); bool maybeInsertAsanInitAtFunctionEntry(Function &F); - void emitShadowMapping(Module &M, IRBuilder<> &IRB) const; virtual bool doInitialization(Module &M); static char ID; // Pass identification, replacement for typeid @@ -312,21 +330,25 @@ struct AddressSanitizer : public FunctionPass { bool ShouldInstrumentGlobal(GlobalVariable *G); bool LooksLikeCodeInBug11395(Instruction *I); void FindDynamicInitializers(Module &M); + bool GlobalIsLinkerInitialized(GlobalVariable *G); + bool InjectCoverage(Function &F, const ArrayRef AllBlocks); + void InjectCoverageAtBlock(Function &F, BasicBlock &BB); bool CheckInitOrder; bool CheckUseAfterReturn; bool CheckLifetime; SmallString<64> BlacklistFile; - bool ZeroBaseShadow; LLVMContext *C; - DataLayout *TD; + const DataLayout *DL; int LongSize; Type *IntptrTy; ShadowMapping Mapping; Function *AsanCtorFunction; Function *AsanInitFunction; Function *AsanHandleNoReturnFunc; + Function *AsanCovFunction; + Function *AsanPtrCmpFunction, *AsanPtrSubFunction; OwningPtr BL; // This array is indexed by AccessIsWrite and log2(AccessSize). Function *AsanErrorCallback[2][kNumberOfAccessSizes]; @@ -341,13 +363,11 @@ struct AddressSanitizer : public FunctionPass { class AddressSanitizerModule : public ModulePass { public: AddressSanitizerModule(bool CheckInitOrder = true, - StringRef BlacklistFile = StringRef(), - bool ZeroBaseShadow = false) + StringRef BlacklistFile = StringRef()) : ModulePass(ID), CheckInitOrder(CheckInitOrder || ClInitializers), BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile - : BlacklistFile), - ZeroBaseShadow(ZeroBaseShadow) {} + : BlacklistFile) {} bool runOnModule(Module &M); static char ID; // Pass identification, replacement for typeid virtual const char *getPassName() const { @@ -359,19 +379,18 @@ class AddressSanitizerModule : public ModulePass { bool ShouldInstrumentGlobal(GlobalVariable *G); void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); - size_t RedzoneSize() const { + size_t MinRedzoneSizeForGlobal() const { return RedzoneSizeForScale(Mapping.Scale); } bool CheckInitOrder; SmallString<64> BlacklistFile; - bool ZeroBaseShadow; OwningPtr BL; SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals; Type *IntptrTy; LLVMContext *C; - DataLayout *TD; + const DataLayout *DL; ShadowMapping Mapping; Function *AsanPoisonGlobals; Function *AsanUnpoisonGlobals; @@ -399,7 +418,6 @@ struct FunctionStackPoisoner : public InstVisitor { SmallVector AllocaVec; SmallVector RetVec; - uint64_t TotalStackSize; unsigned StackAlignment; Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1], @@ -409,6 +427,7 @@ struct FunctionStackPoisoner : public InstVisitor { // Stores a place and arguments of poisoning/unpoisoning call for alloca. struct AllocaPoisonCall { IntrinsicInst *InsBefore; + AllocaInst *AI; uint64_t Size; bool DoPoison; }; @@ -422,7 +441,7 @@ struct FunctionStackPoisoner : public InstVisitor { : F(F), ASan(ASan), DIB(*F.getParent()), C(ASan.C), IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping), - TotalStackSize(0), StackAlignment(1 << Mapping.Scale) {} + StackAlignment(1 << Mapping.Scale) {} bool runOnFunction() { if (!ClStack) return false; @@ -461,8 +480,6 @@ struct FunctionStackPoisoner : public InstVisitor { StackAlignment = std::max(StackAlignment, AI.getAlignment()); AllocaVec.push_back(&AI); - uint64_t AlignedSize = getAlignedAllocaSize(&AI); - TotalStackSize += AlignedSize; } /// \brief Collect lifetime intrinsic calls to check for use-after-scope @@ -487,7 +504,7 @@ struct FunctionStackPoisoner : public InstVisitor { AllocaInst *AI = findAllocaForValue(II.getArgOperand(1)); if (!AI) return; bool DoPoison = (ID == Intrinsic::lifetime_end); - AllocaPoisonCall APC = {&II, SizeValue, DoPoison}; + AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison}; AllocaPoisonCallVec.push_back(APC); } @@ -496,31 +513,20 @@ struct FunctionStackPoisoner : public InstVisitor { // Check if we want (and can) handle this alloca. bool isInterestingAlloca(AllocaInst &AI) const { - return (!AI.isArrayAllocation() && - AI.isStaticAlloca() && - AI.getAlignment() <= RedzoneSize() && - AI.getAllocatedType()->isSized()); + return (!AI.isArrayAllocation() && AI.isStaticAlloca() && + AI.getAllocatedType()->isSized() && + // alloca() may be called with 0 size, ignore it. + getAllocaSizeInBytes(&AI) > 0); } - size_t RedzoneSize() const { - return RedzoneSizeForScale(Mapping.Scale); - } uint64_t getAllocaSizeInBytes(AllocaInst *AI) const { Type *Ty = AI->getAllocatedType(); - uint64_t SizeInBytes = ASan.TD->getTypeAllocSize(Ty); + uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty); return SizeInBytes; } - uint64_t getAlignedSize(uint64_t SizeInBytes) const { - size_t RZ = RedzoneSize(); - return ((SizeInBytes + RZ - 1) / RZ) * RZ; - } - uint64_t getAlignedAllocaSize(AllocaInst *AI) const { - uint64_t SizeInBytes = getAllocaSizeInBytes(AI); - return getAlignedSize(SizeInBytes); - } /// Finds alloca where the value comes from. AllocaInst *findAllocaForValue(Value *V); - void poisonRedZones(const ArrayRef &AllocaVec, IRBuilder<> &IRB, + void poisonRedZones(const ArrayRef ShadowBytes, IRBuilder<> &IRB, Value *ShadowBase, bool DoPoison); void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison); @@ -536,9 +542,9 @@ INITIALIZE_PASS(AddressSanitizer, "asan", false, false) FunctionPass *llvm::createAddressSanitizerFunctionPass( bool CheckInitOrder, bool CheckUseAfterReturn, bool CheckLifetime, - StringRef BlacklistFile, bool ZeroBaseShadow) { + StringRef BlacklistFile) { return new AddressSanitizer(CheckInitOrder, CheckUseAfterReturn, - CheckLifetime, BlacklistFile, ZeroBaseShadow); + CheckLifetime, BlacklistFile); } char AddressSanitizerModule::ID = 0; @@ -546,9 +552,8 @@ INITIALIZE_PASS(AddressSanitizerModule, "asan-module", "AddressSanitizer: detects use-after-free and out-of-bounds bugs." "ModulePass", false, false) ModulePass *llvm::createAddressSanitizerModulePass( - bool CheckInitOrder, StringRef BlacklistFile, bool ZeroBaseShadow) { - return new AddressSanitizerModule(CheckInitOrder, BlacklistFile, - ZeroBaseShadow); + bool CheckInitOrder, StringRef BlacklistFile) { + return new AddressSanitizerModule(CheckInitOrder, BlacklistFile); } static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { @@ -558,12 +563,16 @@ static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { } // \brief Create a constant for Str so that we can pass it to the run-time lib. -static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str) { +static GlobalVariable *createPrivateGlobalForString( + Module &M, StringRef Str, bool AllowMerging) { Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); - GlobalVariable *GV = new GlobalVariable(M, StrConst->getType(), true, - GlobalValue::InternalLinkage, StrConst, - kAsanGenPrefix); - GV->setUnnamedAddr(true); // Ok to merge these. + // We use private linkage for module-local strings. If they can be merged + // with another one, we set the unnamed_addr attribute. + GlobalVariable *GV = + new GlobalVariable(M, StrConst->getType(), true, + GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix); + if (AllowMerging) + GV->setUnnamedAddr(true); GV->setAlignment(1); // Strings may not be merged w/o setting align 1. return GV; } @@ -617,7 +626,7 @@ bool AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { Value *Cmp = IRB.CreateICmpNE(Length, Constant::getNullValue(Length->getType())); - InsertBefore = SplitBlockAndInsertIfThen(cast(Cmp), false); + InsertBefore = SplitBlockAndInsertIfThen(Cmp, InsertBefore, false); } instrumentMemIntrinsicParam(MI, Dst, Length, InsertBefore, true); @@ -652,6 +661,48 @@ static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) { return NULL; } +static bool isPointerOperand(Value *V) { + return V->getType()->isPointerTy() || isa(V); +} + +// This is a rough heuristic; it may cause both false positives and +// false negatives. The proper implementation requires cooperation with +// the frontend. +static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) { + if (ICmpInst *Cmp = dyn_cast(I)) { + if (!Cmp->isRelational()) + return false; + } else if (BinaryOperator *BO = dyn_cast(I)) { + if (BO->getOpcode() != Instruction::Sub) + return false; + } else { + return false; + } + if (!isPointerOperand(I->getOperand(0)) || + !isPointerOperand(I->getOperand(1))) + return false; + return true; +} + +bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) { + // If a global variable does not have dynamic initialization we don't + // have to instrument it. However, if a global does not have initializer + // at all, we assume it has dynamic initializer (in other TU). + return G->hasInitializer() && !DynamicallyInitializedGlobals.Contains(G); +} + +void +AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) { + IRBuilder<> IRB(I); + Function *F = isa(I) ? AsanPtrCmpFunction : AsanPtrSubFunction; + Value *Param[2] = {I->getOperand(0), I->getOperand(1)}; + for (int i = 0; i < 2; i++) { + if (Param[i]->getType()->isPointerTy()) + Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy); + } + IRB.CreateCall2(F, Param[0], Param[1]); +} + void AddressSanitizer::instrumentMop(Instruction *I) { bool IsWrite = false; Value *Addr = isInterestingMemoryAccess(I, &IsWrite); @@ -660,13 +711,19 @@ void AddressSanitizer::instrumentMop(Instruction *I) { if (GlobalVariable *G = dyn_cast(Addr)) { // If initialization order checking is disabled, a simple access to a // dynamically initialized global is always valid. - if (!CheckInitOrder) - return; - // If a global variable does not have dynamic initialization we don't - // have to instrument it. However, if a global does not have initailizer - // at all, we assume it has dynamic initializer (in other TU). - if (G->hasInitializer() && !DynamicallyInitializedGlobals.Contains(G)) + if (!CheckInitOrder || GlobalIsLinkerInitialized(G)) { + NumOptimizedAccessesToGlobalVar++; return; + } + } + ConstantExpr *CE = dyn_cast(Addr); + if (CE && CE->isGEPWithNoNotionalOverIndexing()) { + if (GlobalVariable *G = dyn_cast(CE->getOperand(0))) { + if (CE->getOperand(1)->isNullValue() && GlobalIsLinkerInitialized(G)) { + NumOptimizedAccessesToGlobalArray++; + return; + } + } } } @@ -674,10 +731,15 @@ void AddressSanitizer::instrumentMop(Instruction *I) { Type *OrigTy = cast(OrigPtrTy)->getElementType(); assert(OrigTy->isSized()); - uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy); + uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy); assert((TypeSize % 8) == 0); + if (IsWrite) + NumInstrumentedWrites++; + else + NumInstrumentedReads++; + // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check. if (TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 || TypeSize == 128) @@ -762,7 +824,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { TerminatorInst *CheckTerm = - SplitBlockAndInsertIfThen(cast(Cmp), false); + SplitBlockAndInsertIfThen(Cmp, InsertBefore, false); assert(dyn_cast(CheckTerm)->isUnconditional()); BasicBlock *NextBB = CheckTerm->getSuccessor(0); IRB.SetInsertPoint(CheckTerm); @@ -773,7 +835,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); ReplaceInstWithInst(CheckTerm, NewTerm); } else { - CrashTerm = SplitBlockAndInsertIfThen(cast(Cmp), true); + CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true); } Instruction *Crash = generateCrashCode( @@ -825,8 +887,8 @@ bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { // - Need to poison all copies, not just the main thread's one. if (G->isThreadLocal()) return false; - // For now, just ignore this Alloca if the alignment is large. - if (G->getAlignment() > RedzoneSize()) return false; + // For now, just ignore this Global if the alignment is large. + if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false; // Ignore all the globals with the names starting with "\01L_OBJC_". // Many of those are put into the .cstring section. The linker compresses @@ -890,15 +952,18 @@ void AddressSanitizerModule::initializeCallbacks(Module &M) { // redzones and inserts this function into llvm.global_ctors. bool AddressSanitizerModule::runOnModule(Module &M) { if (!ClGlobals) return false; - TD = getAnalysisIfAvailable(); - if (!TD) + + DataLayoutPass *DLP = getAnalysisIfAvailable(); + if (!DLP) return false; + DL = &DLP->getDataLayout(); + BL.reset(SpecialCaseList::createOrDie(BlacklistFile)); if (BL->isIn(M)) return false; C = &(M.getContext()); - int LongSize = TD->getPointerSizeInBits(); + int LongSize = DL->getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); - Mapping = getShadowMapping(M, LongSize, ZeroBaseShadow); + Mapping = getShadowMapping(M, LongSize); initializeCallbacks(M); DynamicallyInitializedGlobals.Init(M); @@ -924,8 +989,7 @@ bool AddressSanitizerModule::runOnModule(Module &M) { StructType *GlobalStructTy = StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy, NULL); - SmallVector Initializers(n), DynamicInit; - + SmallVector Initializers(n); Function *CtorFunc = M.getFunction(kAsanModuleCtorName); assert(CtorFunc); @@ -933,19 +997,18 @@ bool AddressSanitizerModule::runOnModule(Module &M) { bool HasDynamicallyInitializedGlobals = false; - GlobalVariable *ModuleName = createPrivateGlobalForString( - M, M.getModuleIdentifier()); // We shouldn't merge same module names, as this string serves as unique // module ID in runtime. - ModuleName->setUnnamedAddr(false); + GlobalVariable *ModuleName = createPrivateGlobalForString( + M, M.getModuleIdentifier(), /*AllowMerging*/false); for (size_t i = 0; i < n; i++) { static const uint64_t kMaxGlobalRedzone = 1 << 18; GlobalVariable *G = GlobalsToChange[i]; PointerType *PtrTy = cast(G->getType()); Type *Ty = PtrTy->getElementType(); - uint64_t SizeInBytes = TD->getTypeAllocSize(Ty); - uint64_t MinRZ = RedzoneSize(); + uint64_t SizeInBytes = DL->getTypeAllocSize(Ty); + uint64_t MinRZ = MinRedzoneSizeForGlobal(); // MinRZ <= RZ <= kMaxGlobalRedzone // and trying to make RZ to be ~ 1/4 of SizeInBytes. uint64_t RZ = std::max(MinRZ, @@ -968,7 +1031,8 @@ bool AddressSanitizerModule::runOnModule(Module &M) { NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy), NULL); - GlobalVariable *Name = createPrivateGlobalForString(M, G->getName()); + GlobalVariable *Name = + createPrivateGlobalForString(M, G->getName(), /*AllowMerging*/true); // Create a new global variable with enough space for a redzone. GlobalValue::LinkageTypes Linkage = G->getLinkage(); @@ -1056,41 +1120,31 @@ void AddressSanitizer::initializeCallbacks(Module &M) { AsanHandleNoReturnFunc = checkInterfaceFunction(M.getOrInsertFunction( kAsanHandleNoReturnName, IRB.getVoidTy(), NULL)); + AsanCovFunction = checkInterfaceFunction(M.getOrInsertFunction( + kAsanCovName, IRB.getVoidTy(), NULL)); + AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction( + kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); + AsanPtrSubFunction = checkInterfaceFunction(M.getOrInsertFunction( + kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); // We insert an empty inline asm after __asan_report* to avoid callback merge. EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), StringRef(""), StringRef(""), /*hasSideEffects=*/true); } -void AddressSanitizer::emitShadowMapping(Module &M, IRBuilder<> &IRB) const { - // Tell the values of mapping offset and scale to the run-time. - GlobalValue *asan_mapping_offset = - new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage, - ConstantInt::get(IntptrTy, Mapping.Offset), - kAsanMappingOffsetName); - // Read the global, otherwise it may be optimized away. - IRB.CreateLoad(asan_mapping_offset, true); - - GlobalValue *asan_mapping_scale = - new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage, - ConstantInt::get(IntptrTy, Mapping.Scale), - kAsanMappingScaleName); - // Read the global, otherwise it may be optimized away. - IRB.CreateLoad(asan_mapping_scale, true); -} - // virtual bool AddressSanitizer::doInitialization(Module &M) { // Initialize the private fields. No one has accessed them before. - TD = getAnalysisIfAvailable(); - - if (!TD) + DataLayoutPass *DLP = getAnalysisIfAvailable(); + if (!DLP) return false; + DL = &DLP->getDataLayout(); + BL.reset(SpecialCaseList::createOrDie(BlacklistFile)); DynamicallyInitializedGlobals.Init(M); C = &(M.getContext()); - LongSize = TD->getPointerSizeInBits(); + LongSize = DL->getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); AsanCtorFunction = Function::Create( @@ -1104,8 +1158,7 @@ bool AddressSanitizer::doInitialization(Module &M) { AsanInitFunction->setLinkage(Function::ExternalLinkage); IRB.CreateCall(AsanInitFunction); - Mapping = getShadowMapping(M, LongSize, ZeroBaseShadow); - emitShadowMapping(M, IRB); + Mapping = getShadowMapping(M, LongSize); appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndCtorPriority); return true; @@ -1127,6 +1180,71 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { return false; } +void AddressSanitizer::InjectCoverageAtBlock(Function &F, BasicBlock &BB) { + BasicBlock::iterator IP = BB.getFirstInsertionPt(), BE = BB.end(); + // Skip static allocas at the top of the entry block so they don't become + // dynamic when we split the block. If we used our optimized stack layout, + // then there will only be one alloca and it will come first. + for (; IP != BE; ++IP) { + AllocaInst *AI = dyn_cast(IP); + if (!AI || !AI->isStaticAlloca()) + break; + } + + IRBuilder<> IRB(IP); + Type *Int8Ty = IRB.getInt8Ty(); + GlobalVariable *Guard = new GlobalVariable( + *F.getParent(), Int8Ty, false, GlobalValue::PrivateLinkage, + Constant::getNullValue(Int8Ty), "__asan_gen_cov_" + F.getName()); + LoadInst *Load = IRB.CreateLoad(Guard); + Load->setAtomic(Monotonic); + Load->setAlignment(1); + Value *Cmp = IRB.CreateICmpEQ(Constant::getNullValue(Int8Ty), Load); + Instruction *Ins = SplitBlockAndInsertIfThen( + Cmp, IP, false, MDBuilder(*C).createBranchWeights(1, 100000)); + IRB.SetInsertPoint(Ins); + // We pass &F to __sanitizer_cov. We could avoid this and rely on + // GET_CALLER_PC, but having the PC of the first instruction is just nice. + Instruction *Call = IRB.CreateCall(AsanCovFunction); + Call->setDebugLoc(IP->getDebugLoc()); + StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int8Ty, 1), Guard); + Store->setAtomic(Monotonic); + Store->setAlignment(1); +} + +// Poor man's coverage that works with ASan. +// We create a Guard boolean variable with the same linkage +// as the function and inject this code into the entry block (-asan-coverage=1) +// or all blocks (-asan-coverage=2): +// if (*Guard) { +// __sanitizer_cov(&F); +// *Guard = 1; +// } +// The accesses to Guard are atomic. The rest of the logic is +// in __sanitizer_cov (it's fine to call it more than once). +// +// This coverage implementation provides very limited data: +// it only tells if a given function (block) was ever executed. +// No counters, no per-edge data. +// But for many use cases this is what we need and the added slowdown +// is negligible. This simple implementation will probably be obsoleted +// by the upcoming Clang-based coverage implementation. +// By having it here and now we hope to +// a) get the functionality to users earlier and +// b) collect usage statistics to help improve Clang coverage design. +bool AddressSanitizer::InjectCoverage(Function &F, + const ArrayRef AllBlocks) { + if (!ClCoverage) return false; + + if (ClCoverage == 1) { + InjectCoverageAtBlock(F, F.getEntryBlock()); + } else { + for (size_t i = 0, n = AllBlocks.size(); i < n; i++) + InjectCoverageAtBlock(F, *AllBlocks[i]); + } + return true; +} + bool AddressSanitizer::runOnFunction(Function &F) { if (BL->isIn(F)) return false; if (&F == AsanCtorFunction) return false; @@ -1148,12 +1266,15 @@ bool AddressSanitizer::runOnFunction(Function &F) { SmallSet TempsToInstrument; SmallVector ToInstrument; SmallVector NoReturnCalls; + SmallVector AllBlocks; + SmallVector PointerComparisonsOrSubtracts; int NumAllocas = 0; bool IsWrite; // Fill the set of memory operations to instrument. for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) { + AllBlocks.push_back(FI); TempsToInstrument.clear(); int NumInsnsPerBB = 0; for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); @@ -1164,6 +1285,10 @@ bool AddressSanitizer::runOnFunction(Function &F) { if (!TempsToInstrument.insert(Addr)) continue; // We've seen this temp in the current BB. } + } else if (ClInvalidPointerPairs && + isInterestingPointerComparisonOrSubtraction(BI)) { + PointerComparisonsOrSubtracts.push_back(BI); + continue; } else if (isa(BI) && ClMemIntrin) { // ok, take it. } else { @@ -1221,7 +1346,16 @@ bool AddressSanitizer::runOnFunction(Function &F) { IRB.CreateCall(AsanHandleNoReturnFunc); } + for (size_t i = 0, n = PointerComparisonsOrSubtracts.size(); i != n; i++) { + instrumentPointerComparisonOrSubtraction(PointerComparisonsOrSubtracts[i]); + NumInstrumented++; + } + bool res = NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty(); + + if (InjectCoverage(F, AllBlocks)) + res = true; + DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n"); if (ClKeepUninstrumented) { @@ -1241,32 +1375,6 @@ bool AddressSanitizer::runOnFunction(Function &F) { return res; } -static uint64_t ValueForPoison(uint64_t PoisonByte, size_t ShadowRedzoneSize) { - if (ShadowRedzoneSize == 1) return PoisonByte; - if (ShadowRedzoneSize == 2) return (PoisonByte << 8) + PoisonByte; - if (ShadowRedzoneSize == 4) - return (PoisonByte << 24) + (PoisonByte << 16) + - (PoisonByte << 8) + (PoisonByte); - llvm_unreachable("ShadowRedzoneSize is either 1, 2 or 4"); -} - -static void PoisonShadowPartialRightRedzone(uint8_t *Shadow, - size_t Size, - size_t RZSize, - size_t ShadowGranularity, - uint8_t Magic) { - for (size_t i = 0; i < RZSize; - i+= ShadowGranularity, Shadow++) { - if (i + ShadowGranularity <= Size) { - *Shadow = 0; // fully addressable - } else if (i >= Size) { - *Shadow = Magic; // unaddressable - } else { - *Shadow = Size - i; // first Size-i bytes are addressable - } - } -} - // Workaround for bug 11395: we don't want to instrument stack in functions // with large assembly blobs (32-bit only), otherwise reg alloc may crash. // FIXME: remove once the bug 11395 is fixed. @@ -1296,65 +1404,31 @@ void FunctionStackPoisoner::initializeCallbacks(Module &M) { kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); } -void FunctionStackPoisoner::poisonRedZones( - const ArrayRef &AllocaVec, IRBuilder<> &IRB, Value *ShadowBase, - bool DoPoison) { - size_t ShadowRZSize = RedzoneSize() >> Mapping.Scale; - assert(ShadowRZSize >= 1 && ShadowRZSize <= 4); - Type *RZTy = Type::getIntNTy(*C, ShadowRZSize * 8); - Type *RZPtrTy = PointerType::get(RZTy, 0); - - Value *PoisonLeft = ConstantInt::get(RZTy, - ValueForPoison(DoPoison ? kAsanStackLeftRedzoneMagic : 0LL, ShadowRZSize)); - Value *PoisonMid = ConstantInt::get(RZTy, - ValueForPoison(DoPoison ? kAsanStackMidRedzoneMagic : 0LL, ShadowRZSize)); - Value *PoisonRight = ConstantInt::get(RZTy, - ValueForPoison(DoPoison ? kAsanStackRightRedzoneMagic : 0LL, ShadowRZSize)); - - // poison the first red zone. - IRB.CreateStore(PoisonLeft, IRB.CreateIntToPtr(ShadowBase, RZPtrTy)); - - // poison all other red zones. - uint64_t Pos = RedzoneSize(); - for (size_t i = 0, n = AllocaVec.size(); i < n; i++) { - AllocaInst *AI = AllocaVec[i]; - uint64_t SizeInBytes = getAllocaSizeInBytes(AI); - uint64_t AlignedSize = getAlignedAllocaSize(AI); - assert(AlignedSize - SizeInBytes < RedzoneSize()); - Value *Ptr = NULL; - - Pos += AlignedSize; - - assert(ShadowBase->getType() == IntptrTy); - if (SizeInBytes < AlignedSize) { - // Poison the partial redzone at right - Ptr = IRB.CreateAdd( - ShadowBase, ConstantInt::get(IntptrTy, - (Pos >> Mapping.Scale) - ShadowRZSize)); - size_t AddressableBytes = RedzoneSize() - (AlignedSize - SizeInBytes); - uint32_t Poison = 0; - if (DoPoison) { - PoisonShadowPartialRightRedzone((uint8_t*)&Poison, AddressableBytes, - RedzoneSize(), - 1ULL << Mapping.Scale, - kAsanStackPartialRedzoneMagic); - Poison = - ASan.TD->isLittleEndian() - ? support::endian::byte_swap(Poison) - : support::endian::byte_swap(Poison); +void +FunctionStackPoisoner::poisonRedZones(const ArrayRef ShadowBytes, + IRBuilder<> &IRB, Value *ShadowBase, + bool DoPoison) { + size_t n = ShadowBytes.size(); + size_t i = 0; + // We need to (un)poison n bytes of stack shadow. Poison as many as we can + // using 64-bit stores (if we are on 64-bit arch), then poison the rest + // with 32-bit stores, then with 16-byte stores, then with 8-byte stores. + for (size_t LargeStoreSizeInBytes = ASan.LongSize / 8; + LargeStoreSizeInBytes != 0; LargeStoreSizeInBytes /= 2) { + for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) { + uint64_t Val = 0; + for (size_t j = 0; j < LargeStoreSizeInBytes; j++) { + if (ASan.DL->isLittleEndian()) + Val |= (uint64_t)ShadowBytes[i + j] << (8 * j); + else + Val = (Val << 8) | ShadowBytes[i + j]; } - Value *PartialPoison = ConstantInt::get(RZTy, Poison); - IRB.CreateStore(PartialPoison, IRB.CreateIntToPtr(Ptr, RZPtrTy)); + if (!Val) continue; + Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)); + Type *StoreTy = Type::getIntNTy(*C, LargeStoreSizeInBytes * 8); + Value *Poison = ConstantInt::get(StoreTy, DoPoison ? Val : 0); + IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, StoreTy->getPointerTo())); } - - // Poison the full redzone at right. - Ptr = IRB.CreateAdd(ShadowBase, - ConstantInt::get(IntptrTy, Pos >> Mapping.Scale)); - bool LastAlloca = (i == AllocaVec.size() - 1); - Value *Poison = LastAlloca ? PoisonRight : PoisonMid; - IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, RZPtrTy)); - - Pos += RedzoneSize(); } } @@ -1386,70 +1460,86 @@ void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined( } void FunctionStackPoisoner::poisonStack() { - uint64_t LocalStackSize = TotalStackSize + - (AllocaVec.size() + 1) * RedzoneSize(); - - bool DoStackMalloc = ASan.CheckUseAfterReturn - && LocalStackSize <= kMaxStackMallocSize; int StackMallocIdx = -1; assert(AllocaVec.size() > 0); Instruction *InsBefore = AllocaVec[0]; IRBuilder<> IRB(InsBefore); + SmallVector SVD; + SVD.reserve(AllocaVec.size()); + for (size_t i = 0, n = AllocaVec.size(); i < n; i++) { + AllocaInst *AI = AllocaVec[i]; + ASanStackVariableDescription D = { AI->getName().data(), + getAllocaSizeInBytes(AI), + AI->getAlignment(), AI, 0}; + SVD.push_back(D); + } + // Minimal header size (left redzone) is 4 pointers, + // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms. + size_t MinHeaderSize = ASan.LongSize / 2; + ASanStackFrameLayout L; + ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L); + DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n"); + uint64_t LocalStackSize = L.FrameSize; + bool DoStackMalloc = + ASan.CheckUseAfterReturn && LocalStackSize <= kMaxStackMallocSize; Type *ByteArrayTy = ArrayType::get(IRB.getInt8Ty(), LocalStackSize); AllocaInst *MyAlloca = new AllocaInst(ByteArrayTy, "MyAlloca", InsBefore); - if (ClRealignStack && StackAlignment < RedzoneSize()) - StackAlignment = RedzoneSize(); - MyAlloca->setAlignment(StackAlignment); + assert((ClRealignStack & (ClRealignStack - 1)) == 0); + size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack); + MyAlloca->setAlignment(FrameAlignment); assert(MyAlloca->isStaticAlloca()); Value *OrigStackBase = IRB.CreatePointerCast(MyAlloca, IntptrTy); Value *LocalStackBase = OrigStackBase; if (DoStackMalloc) { + // LocalStackBase = OrigStackBase + // if (__asan_option_detect_stack_use_after_return) + // LocalStackBase = __asan_stack_malloc_N(LocalStackBase, OrigStackBase); StackMallocIdx = StackMallocSizeClass(LocalStackSize); assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass); - LocalStackBase = IRB.CreateCall2(AsanStackMallocFunc[StackMallocIdx], + Constant *OptionDetectUAR = F.getParent()->getOrInsertGlobal( + kAsanOptionDetectUAR, IRB.getInt32Ty()); + Value *Cmp = IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUAR), + Constant::getNullValue(IRB.getInt32Ty())); + Instruction *Term = SplitBlockAndInsertIfThen(Cmp, InsBefore, false); + BasicBlock *CmpBlock = cast(Cmp)->getParent(); + IRBuilder<> IRBIf(Term); + LocalStackBase = IRBIf.CreateCall2( + AsanStackMallocFunc[StackMallocIdx], ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase); + BasicBlock *SetBlock = cast(LocalStackBase)->getParent(); + IRB.SetInsertPoint(InsBefore); + PHINode *Phi = IRB.CreatePHI(IntptrTy, 2); + Phi->addIncoming(OrigStackBase, CmpBlock); + Phi->addIncoming(LocalStackBase, SetBlock); + LocalStackBase = Phi; } - // This string will be parsed by the run-time (DescribeAddressIfStack). - SmallString<2048> StackDescriptionStorage; - raw_svector_ostream StackDescription(StackDescriptionStorage); - StackDescription << AllocaVec.size() << " "; - // Insert poison calls for lifetime intrinsics for alloca. bool HavePoisonedAllocas = false; for (size_t i = 0, n = AllocaPoisonCallVec.size(); i < n; i++) { const AllocaPoisonCall &APC = AllocaPoisonCallVec[i]; - IntrinsicInst *II = APC.InsBefore; - AllocaInst *AI = findAllocaForValue(II->getArgOperand(1)); - assert(AI); - IRBuilder<> IRB(II); - poisonAlloca(AI, APC.Size, IRB, APC.DoPoison); + assert(APC.InsBefore); + assert(APC.AI); + IRBuilder<> IRB(APC.InsBefore); + poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison); HavePoisonedAllocas |= APC.DoPoison; } - uint64_t Pos = RedzoneSize(); // Replace Alloca instructions with base+offset. - for (size_t i = 0, n = AllocaVec.size(); i < n; i++) { - AllocaInst *AI = AllocaVec[i]; - uint64_t SizeInBytes = getAllocaSizeInBytes(AI); - StringRef Name = AI->getName(); - StackDescription << Pos << " " << SizeInBytes << " " - << Name.size() << " " << Name << " "; - uint64_t AlignedSize = getAlignedAllocaSize(AI); - assert((AlignedSize % RedzoneSize()) == 0); + for (size_t i = 0, n = SVD.size(); i < n; i++) { + AllocaInst *AI = SVD[i].AI; Value *NewAllocaPtr = IRB.CreateIntToPtr( - IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Pos)), - AI->getType()); + IRB.CreateAdd(LocalStackBase, + ConstantInt::get(IntptrTy, SVD[i].Offset)), + AI->getType()); replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB); AI->replaceAllUsesWith(NewAllocaPtr); - Pos += AlignedSize + RedzoneSize(); } - assert(Pos == LocalStackSize); // The left-most redzone has enough space for at least 4 pointers. // Write the Magic value to redzone[0]. @@ -1461,7 +1551,8 @@ void FunctionStackPoisoner::poisonStack() { IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)), IntptrPtrTy); GlobalVariable *StackDescriptionGlobal = - createPrivateGlobalForString(*F.getParent(), StackDescription.str()); + createPrivateGlobalForString(*F.getParent(), L.DescriptionString, + /*AllowMerging*/true); Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); IRB.CreateStore(Description, BasePlus1); @@ -1474,30 +1565,33 @@ void FunctionStackPoisoner::poisonStack() { // Poison the stack redzones at the entry. Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); - poisonRedZones(AllocaVec, IRB, ShadowBase, true); + poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true); - // Unpoison the stack before all ret instructions. + // (Un)poison the stack before all ret instructions. for (size_t i = 0, n = RetVec.size(); i < n; i++) { Instruction *Ret = RetVec[i]; IRBuilder<> IRBRet(Ret); // Mark the current frame as retired. IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), BasePlus0); - // Unpoison the stack. - poisonRedZones(AllocaVec, IRBRet, ShadowBase, false); if (DoStackMalloc) { assert(StackMallocIdx >= 0); - // In use-after-return mode, mark the whole stack frame unaddressable. + // if LocalStackBase != OrigStackBase: + // // In use-after-return mode, poison the whole stack frame. + // if StackMallocIdx <= 4 + // // For small sizes inline the whole thing: + // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); + // **SavedFlagPtr(LocalStackBase) = 0 + // else + // __asan_stack_free_N(LocalStackBase, OrigStackBase) + // else + // + Value *Cmp = IRBRet.CreateICmpNE(LocalStackBase, OrigStackBase); + TerminatorInst *ThenTerm, *ElseTerm; + SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm); + + IRBuilder<> IRBPoison(ThenTerm); if (StackMallocIdx <= 4) { - // For small sizes inline the whole thing: - // if LocalStackBase != OrigStackBase: - // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize); - // **SavedFlagPtr(LocalStackBase) = 0 - // FIXME: if LocalStackBase != OrigStackBase don't call poisonRedZones. - Value *Cmp = IRBRet.CreateICmpNE(LocalStackBase, OrigStackBase); - TerminatorInst *PoisonTerm = - SplitBlockAndInsertIfThen(cast(Cmp), false); - IRBuilder<> IRBPoison(PoisonTerm); int ClassSize = kMinStackMallocSize << StackMallocIdx; SetShadowToStackAfterReturnInlined(IRBPoison, ShadowBase, ClassSize >> Mapping.Scale); @@ -1511,15 +1605,20 @@ void FunctionStackPoisoner::poisonStack() { IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy())); } else { // For larger frames call __asan_stack_free_*. - IRBRet.CreateCall3(AsanStackFreeFunc[StackMallocIdx], LocalStackBase, - ConstantInt::get(IntptrTy, LocalStackSize), - OrigStackBase); + IRBPoison.CreateCall3(AsanStackFreeFunc[StackMallocIdx], LocalStackBase, + ConstantInt::get(IntptrTy, LocalStackSize), + OrigStackBase); } + + IRBuilder<> IRBElse(ElseTerm); + poisonRedZones(L.ShadowBytes, IRBElse, ShadowBase, false); } else if (HavePoisonedAllocas) { // If we poisoned some allocas in llvm.lifetime analysis, // unpoison whole stack frame now. assert(LocalStackBase == OrigStackBase); poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false); + } else { + poisonRedZones(L.ShadowBytes, IRBRet, ShadowBase, false); } }