X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;ds=sidebyside;f=lib%2FTransforms%2FInstrumentation%2FMemorySanitizer.cpp;h=218e3e96c23977b4368c44f42195bae04c9e7187;hb=b2bc86f25144aa907b977e27fd93e316e15500d6;hp=c2aa1e2f772ec7462ac643110bc8065d798905c4;hpb=529919ff310cbfce1ba55ea252ff738d5b56b93d;p=oota-llvm.git diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp index c2aa1e2f772..218e3e96c23 100644 --- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -148,7 +148,7 @@ static cl::opt ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false)); static cl::opt ClPoisonStackPattern("msan-poison-stack-pattern", - cl::desc("poison uninitialized stack variables with the given patter"), + cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff)); static cl::opt ClPoisonUndef("msan-poison-undef", cl::desc("poison undef temps"), @@ -191,6 +191,9 @@ static cl::opt ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(false)); +static const char *const kMsanModuleCtorName = "msan.module_ctor"; +static const char *const kMsanInitName = "__msan_init"; + namespace { // Memory map parameters used in application-to-shadow address calculation. @@ -219,10 +222,17 @@ static const MemoryMapParams Linux_I386_MemoryMapParams = { // x86_64 Linux static const MemoryMapParams Linux_X86_64_MemoryMapParams = { +#ifdef MSAN_LINUX_X86_64_OLD_MAPPING 0x400000000000, // AndMask 0, // XorMask (not used) 0, // ShadowBase (not used) 0x200000000000, // OriginBase +#else + 0, // AndMask (not used) + 0x500000000000, // XorMask + 0, // ShadowBase (not used) + 0x100000000000, // OriginBase +#endif }; // mips64 Linux @@ -233,6 +243,22 @@ static const MemoryMapParams Linux_MIPS64_MemoryMapParams = { 0x002000000000, // OriginBase }; +// ppc64 Linux +static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = { + 0x200000000000, // AndMask + 0x100000000000, // XorMask + 0x080000000000, // ShadowBase + 0x1C0000000000, // OriginBase +}; + +// aarch64 Linux +static const MemoryMapParams Linux_AArch64_MemoryMapParams = { + 0, // AndMask (not used) + 0x06000000000, // XorMask + 0, // ShadowBase (not used) + 0x01000000000, // OriginBase +}; + // i386 FreeBSD static const MemoryMapParams FreeBSD_I386_MemoryMapParams = { 0x000180000000, // AndMask @@ -255,10 +281,20 @@ static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = { }; static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = { - NULL, + nullptr, &Linux_MIPS64_MemoryMapParams, }; +static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = { + nullptr, + &Linux_PowerPC64_MemoryMapParams, +}; + +static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = { + nullptr, + &Linux_AArch64_MemoryMapParams, +}; + static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = { &FreeBSD_I386_MemoryMapParams, &FreeBSD_X86_64_MemoryMapParams, @@ -332,12 +368,13 @@ class MemorySanitizer : public FunctionPass { MDNode *OriginStoreWeights; /// \brief An empty volatile inline asm that prevents callback merge. InlineAsm *EmptyAsm; + Function *MsanCtorFunction; friend struct MemorySanitizerVisitor; friend struct VarArgAMD64Helper; friend struct VarArgMIPS64Helper; }; -} // namespace +} // anonymous namespace char MemorySanitizer::ID = 0; INITIALIZE_PASS(MemorySanitizer, "msan", @@ -360,7 +397,6 @@ static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, GlobalValue::PrivateLinkage, StrConst, ""); } - /// \brief Insert extern declaration of runtime-provided functions and globals. void MemorySanitizer::initializeCallbacks(Module &M) { // Only do this once. @@ -475,6 +511,14 @@ bool MemorySanitizer::doInitialization(Module &M) { case Triple::mips64el: MapParams = Linux_MIPS_MemoryMapParams.bits64; break; + case Triple::ppc64: + case Triple::ppc64le: + MapParams = Linux_PowerPC_MemoryMapParams.bits64; + break; + case Triple::aarch64: + case Triple::aarch64_be: + MapParams = Linux_ARM_MemoryMapParams.bits64; + break; default: report_fatal_error("unsupported architecture"); } @@ -491,9 +535,12 @@ bool MemorySanitizer::doInitialization(Module &M) { ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000); OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000); - // Insert a call to __msan_init/__msan_track_origins into the module's CTORs. - appendToGlobalCtors(M, cast(M.getOrInsertFunction( - "__msan_init", IRB.getVoidTy(), nullptr)), 0); + std::tie(MsanCtorFunction, std::ignore) = + createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName, + /*InitArgTypes=*/{}, + /*InitArgs=*/{}); + + appendToGlobalCtors(M, MsanCtorFunction, 0); if (TrackOrigins) new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage, @@ -623,8 +670,8 @@ struct MemorySanitizerVisitor : public InstVisitor { Value *IntptrOriginPtr = IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0)); for (unsigned i = 0; i < Size / IntptrSize; ++i) { - Value *Ptr = - i ? IRB.CreateConstGEP1_32(IntptrOriginPtr, i) : IntptrOriginPtr; + Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i) + : IntptrOriginPtr; IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment); Ofs += IntptrSize / kOriginSize; CurrentAlignment = IntptrAlignment; @@ -632,7 +679,8 @@ struct MemorySanitizerVisitor : public InstVisitor { } for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) { - Value *GEP = i ? IRB.CreateConstGEP1_32(OriginPtr, i) : OriginPtr; + Value *GEP = + i ? IRB.CreateConstGEP1_32(nullptr, OriginPtr, i) : OriginPtr; IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment); CurrentAlignment = kMinOriginAlignment; } @@ -665,14 +713,14 @@ struct MemorySanitizerVisitor : public InstVisitor { Value *Fn = MS.MaybeStoreOriginFn[SizeIndex]; Value *ConvertedShadow2 = IRB.CreateZExt( ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); - IRB.CreateCall3(Fn, ConvertedShadow2, - IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), - Origin); + IRB.CreateCall(Fn, {ConvertedShadow2, + IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), + Origin}); } else { Value *Cmp = IRB.CreateICmpNE( ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp"); Instruction *CheckTerm = SplitBlockAndInsertIfThen( - Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights); + Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights); IRBuilder<> IRBNew(CheckTerm); paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), getOriginPtr(Addr, IRBNew, Alignment), StoreSize, @@ -720,8 +768,8 @@ struct MemorySanitizerVisitor : public InstVisitor { IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), MS.OriginTLS); } - IRB.CreateCall(MS.WarningFn); - IRB.CreateCall(MS.EmptyAsm); + IRB.CreateCall(MS.WarningFn, {}); + IRB.CreateCall(MS.EmptyAsm, {}); // FIXME: Insert UnreachableInst if !ClKeepGoing? // This may invalidate some of the following checks and needs to be done // at the very end. @@ -737,9 +785,9 @@ struct MemorySanitizerVisitor : public InstVisitor { Value *Fn = MS.MaybeWarningFn[SizeIndex]; Value *ConvertedShadow2 = IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex))); - IRB.CreateCall2(Fn, ConvertedShadow2, MS.TrackOrigins && Origin + IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin ? Origin - : (Value *)IRB.getInt32(0)); + : (Value *)IRB.getInt32(0)}); } else { Value *Cmp = IRB.CreateICmpNE(ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp"); @@ -752,8 +800,8 @@ struct MemorySanitizerVisitor : public InstVisitor { IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), MS.OriginTLS); } - IRB.CreateCall(MS.WarningFn); - IRB.CreateCall(MS.EmptyAsm); + IRB.CreateCall(MS.WarningFn, {}); + IRB.CreateCall(MS.EmptyAsm, {}); DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); } } @@ -868,16 +916,17 @@ struct MemorySanitizerVisitor : public InstVisitor { /// /// Offset = (Addr & ~AndMask) ^ XorMask Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) { + Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy); + uint64_t AndMask = MS.MapParams->AndMask; - assert(AndMask != 0 && "AndMask shall be specified"); - Value *OffsetLong = - IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy), - ConstantInt::get(MS.IntptrTy, ~AndMask)); + if (AndMask) + OffsetLong = + IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask)); uint64_t XorMask = MS.MapParams->XorMask; - if (XorMask != 0) - OffsetLong = IRB.CreateXor(OffsetLong, - ConstantInt::get(MS.IntptrTy, XorMask)); + if (XorMask) + OffsetLong = + IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask)); return OffsetLong; } @@ -1314,6 +1363,12 @@ struct MemorySanitizerVisitor : public InstVisitor { } void visitBitCastInst(BitCastInst &I) { + // Special case: if this is the bitcast (there is exactly 1 allowed) between + // a musttail call and a ret, don't instrument. New instructions are not + // allowed after a musttail call. + if (auto *CI = dyn_cast(I.getOperand(0))) + if (CI->isMustTailCall()) + return; IRBuilder<> IRB(&I); setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I))); setOrigin(&I, getOrigin(&I, 0)); @@ -1545,18 +1600,24 @@ struct MemorySanitizerVisitor : public InstVisitor { Type *EltTy = Ty->getSequentialElementType(); SmallVector Elements; for (unsigned Idx = 0; Idx < NumElements; ++Idx) { - ConstantInt *Elt = - dyn_cast(ConstArg->getAggregateElement(Idx)); - APInt V = Elt->getValue(); - APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); - Elements.push_back(ConstantInt::get(EltTy, V2)); + if (ConstantInt *Elt = + dyn_cast(ConstArg->getAggregateElement(Idx))) { + APInt V = Elt->getValue(); + APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); + Elements.push_back(ConstantInt::get(EltTy, V2)); + } else { + Elements.push_back(ConstantInt::get(EltTy, 1)); + } } ShadowMul = ConstantVector::get(Elements); } else { - ConstantInt *Elt = dyn_cast(ConstArg); - APInt V = Elt->getValue(); - APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); - ShadowMul = ConstantInt::get(Elt->getType(), V2); + if (ConstantInt *Elt = dyn_cast(ConstArg)) { + APInt V = Elt->getValue(); + APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros(); + ShadowMul = ConstantInt::get(Ty, V2); + } else { + ShadowMul = ConstantInt::get(Ty, 1); + } } IRBuilder<> IRB(&I); @@ -1705,25 +1766,30 @@ struct MemorySanitizerVisitor : public InstVisitor { /// \brief Instrument signed relational comparisons. /// - /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by - /// propagating the highest bit of the shadow. Everything else is delegated - /// to handleShadowOr(). + /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest + /// bit of the shadow. Everything else is delegated to handleShadowOr(). void handleSignedRelationalComparison(ICmpInst &I) { - Constant *constOp0 = dyn_cast(I.getOperand(0)); - Constant *constOp1 = dyn_cast(I.getOperand(1)); - Value* op = nullptr; - CmpInst::Predicate pre = I.getPredicate(); - if (constOp0 && constOp0->isNullValue() && - (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) { - op = I.getOperand(1); - } else if (constOp1 && constOp1->isNullValue() && - (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) { + Constant *constOp; + Value *op = nullptr; + CmpInst::Predicate pre; + if ((constOp = dyn_cast(I.getOperand(1)))) { op = I.getOperand(0); + pre = I.getPredicate(); + } else if ((constOp = dyn_cast(I.getOperand(0)))) { + op = I.getOperand(1); + pre = I.getSwappedPredicate(); + } else { + handleShadowOr(I); + return; } - if (op) { + + if ((constOp->isNullValue() && + (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) || + (constOp->isAllOnesValue() && + (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) { IRBuilder<> IRB(&I); - Value* Shadow = - IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt"); + Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), + "_msprop_icmp_s"); setShadow(&I, Shadow); setOrigin(&I, getOrigin(op)); } else { @@ -1794,11 +1860,11 @@ struct MemorySanitizerVisitor : public InstVisitor { /// Similar situation exists for memcpy and memset. void visitMemMoveInst(MemMoveInst &I) { IRBuilder<> IRB(&I); - IRB.CreateCall3( - MS.MemmoveFn, - IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), - IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), - IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); + IRB.CreateCall( + MS.MemmoveFn, + {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), + IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), + IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)}); I.eraseFromParent(); } @@ -1808,22 +1874,22 @@ struct MemorySanitizerVisitor : public InstVisitor { // alignment. void visitMemCpyInst(MemCpyInst &I) { IRBuilder<> IRB(&I); - IRB.CreateCall3( - MS.MemcpyFn, - IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), - IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), - IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); + IRB.CreateCall( + MS.MemcpyFn, + {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), + IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()), + IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)}); I.eraseFromParent(); } // Same as memcpy. void visitMemSetInst(MemSetInst &I) { IRBuilder<> IRB(&I); - IRB.CreateCall3( - MS.MemsetFn, - IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), - IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false), - IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)); + IRB.CreateCall( + MS.MemsetFn, + {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()), + IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false), + IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)}); I.eraseFromParent(); } @@ -1835,25 +1901,6 @@ struct MemorySanitizerVisitor : public InstVisitor { VAHelper->visitVACopyInst(I); } - enum IntrinsicKind { - IK_DoesNotAccessMemory, - IK_OnlyReadsMemory, - IK_WritesMemory - }; - - static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) { - const int DoesNotAccessMemory = IK_DoesNotAccessMemory; - const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory; - const int OnlyReadsMemory = IK_OnlyReadsMemory; - const int OnlyAccessesArgumentPointees = IK_WritesMemory; - const int UnknownModRefBehavior = IK_WritesMemory; -#define GET_INTRINSIC_MODREF_BEHAVIOR -#define ModRefBehavior IntrinsicKind -#include "llvm/IR/Intrinsics.gen" -#undef ModRefBehavior -#undef GET_INTRINSIC_MODREF_BEHAVIOR - } - /// \brief Handle vector store-like intrinsics. /// /// Instrument intrinsics that look like a simple SIMD store: writes memory, @@ -1953,17 +2000,11 @@ struct MemorySanitizerVisitor : public InstVisitor { if (NumArgOperands == 0) return false; - Intrinsic::ID iid = I.getIntrinsicID(); - IntrinsicKind IK = getIntrinsicKind(iid); - bool OnlyReadsMemory = IK == IK_OnlyReadsMemory; - bool WritesMemory = IK == IK_WritesMemory; - assert(!(OnlyReadsMemory && WritesMemory)); - if (NumArgOperands == 2 && I.getArgOperand(0)->getType()->isPointerTy() && I.getArgOperand(1)->getType()->isVectorTy() && I.getType()->isVoidTy() && - WritesMemory) { + !I.onlyReadsMemory()) { // This looks like a vector store. return handleVectorStoreIntrinsic(I); } @@ -1971,12 +2012,12 @@ struct MemorySanitizerVisitor : public InstVisitor { if (NumArgOperands == 1 && I.getArgOperand(0)->getType()->isPointerTy() && I.getType()->isVectorTy() && - OnlyReadsMemory) { + I.onlyReadsMemory()) { // This looks like a vector load. return handleVectorLoadIntrinsic(I); } - if (!OnlyReadsMemory && !WritesMemory) + if (I.doesNotAccessMemory()) if (maybeHandleSimpleNomemIntrinsic(I)) return true; @@ -2014,6 +2055,8 @@ struct MemorySanitizerVisitor : public InstVisitor { Value *CopyOp, *ConvertOp; switch (I.getNumArgOperands()) { + case 3: + assert(isa(I.getArgOperand(2)) && "Invalid rounding mode"); case 2: CopyOp = I.getArgOperand(0); ConvertOp = I.getArgOperand(1); @@ -2104,8 +2147,8 @@ struct MemorySanitizerVisitor : public InstVisitor { : Lower64ShadowExtend(IRB, S2, getShadowTy(&I)); Value *V1 = I.getOperand(0); Value *V2 = I.getOperand(1); - Value *Shift = IRB.CreateCall2(I.getCalledValue(), - IRB.CreateBitCast(S1, V1->getType()), V2); + Value *Shift = IRB.CreateCall(I.getCalledValue(), + {IRB.CreateBitCast(S1, V1->getType()), V2}); Shift = IRB.CreateBitCast(Shift, getShadowTy(&I)); setShadow(&I, IRB.CreateOr(Shift, S2Conv)); setOriginForNaryOp(I); @@ -2185,7 +2228,8 @@ struct MemorySanitizerVisitor : public InstVisitor { Function *ShadowFn = Intrinsic::getDeclaration( F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID())); - Value *S = IRB.CreateCall2(ShadowFn, S1_ext, S2_ext, "_msprop_vector_pack"); + Value *S = + IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack"); if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I)); setShadow(&I, S); setOriginForNaryOp(I); @@ -2465,13 +2509,16 @@ struct MemorySanitizerVisitor : public InstVisitor { // Now, get the shadow for the RetVal. if (!I.getType()->isSized()) return; + // Don't emit the epilogue for musttail call returns. + if (CS.isCall() && cast(&I)->isMustTailCall()) return; IRBuilder<> IRBBefore(&I); // Until we have full dynamic coverage, make sure the retval shadow is 0. Value *Base = getShadowPtrForRetval(&I, IRBBefore); IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); - Instruction *NextInsn = nullptr; + BasicBlock::iterator NextInsn; if (CS.isCall()) { - NextInsn = I.getNextNode(); + NextInsn = ++I.getIterator(); + assert(NextInsn != I.getParent()->end()); } else { BasicBlock *NormalDest = cast(&I)->getNormalDest(); if (!NormalDest->getSinglePredecessor()) { @@ -2483,10 +2530,10 @@ struct MemorySanitizerVisitor : public InstVisitor { return; } NextInsn = NormalDest->getFirstInsertionPt(); - assert(NextInsn && + assert(NextInsn != NormalDest->end() && "Could not find insertion point for retval shadow load"); } - IRBuilder<> IRBAfter(NextInsn); + IRBuilder<> IRBAfter(&*NextInsn); Value *RetvalShadow = IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter), kShadowTLSAlignment, "_msret"); @@ -2495,10 +2542,22 @@ struct MemorySanitizerVisitor : public InstVisitor { setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter))); } + bool isAMustTailRetVal(Value *RetVal) { + if (auto *I = dyn_cast(RetVal)) { + RetVal = I->getOperand(0); + } + if (auto *I = dyn_cast(RetVal)) { + return I->isMustTailCall(); + } + return false; + } + void visitReturnInst(ReturnInst &I) { IRBuilder<> IRB(&I); Value *RetVal = I.getReturnValue(); if (!RetVal) return; + // Don't emit the epilogue for musttail call returns. + if (isAMustTailRetVal(RetVal)) return; Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB); if (CheckReturnValue) { insertShadowCheck(RetVal, &I); @@ -2536,9 +2595,9 @@ struct MemorySanitizerVisitor : public InstVisitor { const DataLayout &DL = F.getParent()->getDataLayout(); uint64_t Size = DL.getTypeAllocSize(I.getAllocatedType()); if (PoisonStack && ClPoisonStackWithCall) { - IRB.CreateCall2(MS.MsanPoisonStackFn, - IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), - ConstantInt::get(MS.IntptrTy, Size)); + IRB.CreateCall(MS.MsanPoisonStackFn, + {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), + ConstantInt::get(MS.IntptrTy, Size)}); } else { Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB); Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0); @@ -2558,11 +2617,11 @@ struct MemorySanitizerVisitor : public InstVisitor { createPrivateNonConstGlobalForString(*F.getParent(), StackDescription.str()); - IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn, - IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), + IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn, + {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), ConstantInt::get(MS.IntptrTy, Size), IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()), - IRB.CreatePointerCast(&F, MS.IntptrTy)); + IRB.CreatePointerCast(&F, MS.IntptrTy)}); } } @@ -2625,6 +2684,31 @@ struct MemorySanitizerVisitor : public InstVisitor { setOrigin(&I, getCleanOrigin()); } + void visitCleanupPadInst(CleanupPadInst &I) { + setShadow(&I, getCleanShadow(&I)); + setOrigin(&I, getCleanOrigin()); + } + + void visitCatchPad(CatchPadInst &I) { + setShadow(&I, getCleanShadow(&I)); + setOrigin(&I, getCleanOrigin()); + } + + void visitTerminatePad(TerminatePadInst &I) { + DEBUG(dbgs() << "TerminatePad: " << I << "\n"); + // Nothing to do here. + } + + void visitCatchEndPadInst(CatchEndPadInst &I) { + DEBUG(dbgs() << "CatchEndPad: " << I << "\n"); + // Nothing to do here. + } + + void visitCleanupEndPadInst(CleanupEndPadInst &I) { + DEBUG(dbgs() << "CleanupEndPad: " << I << "\n"); + // Nothing to do here. + } + void visitGetElementPtrInst(GetElementPtrInst &I) { handleShadowOr(I); } @@ -2668,6 +2752,16 @@ struct MemorySanitizerVisitor : public InstVisitor { // Nothing to do here. } + void visitCleanupReturnInst(CleanupReturnInst &CRI) { + DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n"); + // Nothing to do here. + } + + void visitCatchReturnInst(CatchReturnInst &CRI) { + DEBUG(dbgs() << "CatchReturn: " << CRI << "\n"); + // Nothing to do here. + } + void visitInstruction(Instruction &I) { // Everything else: stop propagating and check for poisoned shadow. if (ClDumpStrictInstructions) @@ -2780,6 +2874,8 @@ struct VarArgAMD64Helper : public VarArgHelper { } void visitVAStartInst(VAStartInst &I) override { + if (F.getCallingConv() == CallingConv::X86_64_Win64) + return; IRBuilder<> IRB(&I); VAStartInstrumentationList.push_back(&I); Value *VAListTag = I.getArgOperand(0); @@ -2792,6 +2888,8 @@ struct VarArgAMD64Helper : public VarArgHelper { } void visitVACopyInst(VACopyInst &I) override { + if (F.getCallingConv() == CallingConv::X86_64_Win64) + return; IRBuilder<> IRB(&I); Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB); @@ -2843,7 +2941,8 @@ struct VarArgAMD64Helper : public VarArgHelper { Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr); Value *OverflowArgAreaShadowPtr = MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB); - Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset); + Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy, + AMD64FpEndOffset); IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16); } } @@ -2978,9 +3077,11 @@ VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, return new VarArgNoOpHelper(Func, Msan, Visitor); } -} // namespace +} // anonymous namespace bool MemorySanitizer::runOnFunction(Function &F) { + if (&F == MsanCtorFunction) + return false; MemorySanitizerVisitor Visitor(F, *this); // Clear out readonly/readnone attributes.