cl::desc("poison uninitialized stack variables with a call"),
cl::Hidden, cl::init(false));
static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
- cl::desc("poison uninitialized stack variables with the given patter"),
+ cl::desc("poison uninitialized stack variables with the given pattern"),
cl::Hidden, cl::init(0xff));
static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
cl::desc("poison undef temps"),
cl::desc("Insert checks for constant shadow values"),
cl::Hidden, cl::init(false));
+static const char *const kMsanModuleCtorName = "msan.module_ctor";
+static const char *const kMsanInitName = "__msan_init";
+
namespace {
// Memory map parameters used in application-to-shadow address calculation.
// x86_64 Linux
static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
+#ifdef MSAN_LINUX_X86_64_OLD_MAPPING
0x400000000000, // AndMask
0, // XorMask (not used)
0, // ShadowBase (not used)
0x200000000000, // OriginBase
+#else
+ 0, // AndMask (not used)
+ 0x500000000000, // XorMask
+ 0, // ShadowBase (not used)
+ 0x100000000000, // OriginBase
+#endif
};
// mips64 Linux
0x002000000000, // OriginBase
};
+// ppc64 Linux
+static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
+ 0x200000000000, // AndMask
+ 0x100000000000, // XorMask
+ 0x080000000000, // ShadowBase
+ 0x1C0000000000, // OriginBase
+};
+
+// aarch64 Linux
+static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
+ 0, // AndMask (not used)
+ 0x06000000000, // XorMask
+ 0, // ShadowBase (not used)
+ 0x01000000000, // OriginBase
+};
+
// i386 FreeBSD
static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
0x000180000000, // AndMask
};
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
- NULL,
+ nullptr,
&Linux_MIPS64_MemoryMapParams,
};
+static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
+ nullptr,
+ &Linux_PowerPC64_MemoryMapParams,
+};
+
+static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
+ nullptr,
+ &Linux_AArch64_MemoryMapParams,
+};
+
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
&FreeBSD_I386_MemoryMapParams,
&FreeBSD_X86_64_MemoryMapParams,
MDNode *OriginStoreWeights;
/// \brief An empty volatile inline asm that prevents callback merge.
InlineAsm *EmptyAsm;
+ Function *MsanCtorFunction;
friend struct MemorySanitizerVisitor;
friend struct VarArgAMD64Helper;
friend struct VarArgMIPS64Helper;
};
-} // namespace
+} // anonymous namespace
char MemorySanitizer::ID = 0;
INITIALIZE_PASS(MemorySanitizer, "msan",
GlobalValue::PrivateLinkage, StrConst, "");
}
-
/// \brief Insert extern declaration of runtime-provided functions and globals.
void MemorySanitizer::initializeCallbacks(Module &M) {
// Only do this once.
case Triple::mips64el:
MapParams = Linux_MIPS_MemoryMapParams.bits64;
break;
+ case Triple::ppc64:
+ case Triple::ppc64le:
+ MapParams = Linux_PowerPC_MemoryMapParams.bits64;
+ break;
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ MapParams = Linux_ARM_MemoryMapParams.bits64;
+ break;
default:
report_fatal_error("unsupported architecture");
}
ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
- // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
- appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
- "__msan_init", IRB.getVoidTy(), nullptr)), 0);
+ std::tie(MsanCtorFunction, std::ignore) =
+ createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName,
+ /*InitArgTypes=*/{},
+ /*InitArgs=*/{});
+
+ appendToGlobalCtors(M, MsanCtorFunction, 0);
if (TrackOrigins)
new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
Value *IntptrOriginPtr =
IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
for (unsigned i = 0; i < Size / IntptrSize; ++i) {
- Value *Ptr =
- i ? IRB.CreateConstGEP1_32(IntptrOriginPtr, i) : IntptrOriginPtr;
+ Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
+ : IntptrOriginPtr;
IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
Ofs += IntptrSize / kOriginSize;
CurrentAlignment = IntptrAlignment;
}
for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
- Value *GEP = i ? IRB.CreateConstGEP1_32(OriginPtr, i) : OriginPtr;
+ Value *GEP =
+ i ? IRB.CreateConstGEP1_32(nullptr, OriginPtr, i) : OriginPtr;
IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
CurrentAlignment = kMinOriginAlignment;
}
Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
Value *ConvertedShadow2 = IRB.CreateZExt(
ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
- IRB.CreateCall3(Fn, ConvertedShadow2,
- IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
- Origin);
+ IRB.CreateCall(Fn, {ConvertedShadow2,
+ IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
+ Origin});
} else {
Value *Cmp = IRB.CreateICmpNE(
ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
Instruction *CheckTerm = SplitBlockAndInsertIfThen(
- Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
+ Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
IRBuilder<> IRBNew(CheckTerm);
paintOrigin(IRBNew, updateOrigin(Origin, IRBNew),
getOriginPtr(Addr, IRBNew, Alignment), StoreSize,
IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
MS.OriginTLS);
}
- IRB.CreateCall(MS.WarningFn);
- IRB.CreateCall(MS.EmptyAsm);
+ IRB.CreateCall(MS.WarningFn, {});
+ IRB.CreateCall(MS.EmptyAsm, {});
// FIXME: Insert UnreachableInst if !ClKeepGoing?
// This may invalidate some of the following checks and needs to be done
// at the very end.
Value *Fn = MS.MaybeWarningFn[SizeIndex];
Value *ConvertedShadow2 =
IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
- IRB.CreateCall2(Fn, ConvertedShadow2, MS.TrackOrigins && Origin
+ IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
? Origin
- : (Value *)IRB.getInt32(0));
+ : (Value *)IRB.getInt32(0)});
} else {
Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
getCleanShadow(ConvertedShadow), "_mscmp");
IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
MS.OriginTLS);
}
- IRB.CreateCall(MS.WarningFn);
- IRB.CreateCall(MS.EmptyAsm);
+ IRB.CreateCall(MS.WarningFn, {});
+ IRB.CreateCall(MS.EmptyAsm, {});
DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
}
}
///
/// Offset = (Addr & ~AndMask) ^ XorMask
Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
+ Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
+
uint64_t AndMask = MS.MapParams->AndMask;
- assert(AndMask != 0 && "AndMask shall be specified");
- Value *OffsetLong =
- IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
- ConstantInt::get(MS.IntptrTy, ~AndMask));
+ if (AndMask)
+ OffsetLong =
+ IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
uint64_t XorMask = MS.MapParams->XorMask;
- if (XorMask != 0)
- OffsetLong = IRB.CreateXor(OffsetLong,
- ConstantInt::get(MS.IntptrTy, XorMask));
+ if (XorMask)
+ OffsetLong =
+ IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
return OffsetLong;
}
}
void visitBitCastInst(BitCastInst &I) {
+ // Special case: if this is the bitcast (there is exactly 1 allowed) between
+ // a musttail call and a ret, don't instrument. New instructions are not
+ // allowed after a musttail call.
+ if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
+ if (CI->isMustTailCall())
+ return;
IRBuilder<> IRB(&I);
setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
setOrigin(&I, getOrigin(&I, 0));
Type *EltTy = Ty->getSequentialElementType();
SmallVector<Constant *, 16> Elements;
for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
- ConstantInt *Elt =
- dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx));
- APInt V = Elt->getValue();
- APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
- Elements.push_back(ConstantInt::get(EltTy, V2));
+ if (ConstantInt *Elt =
+ dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
+ APInt V = Elt->getValue();
+ APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
+ Elements.push_back(ConstantInt::get(EltTy, V2));
+ } else {
+ Elements.push_back(ConstantInt::get(EltTy, 1));
+ }
}
ShadowMul = ConstantVector::get(Elements);
} else {
- ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg);
- APInt V = Elt->getValue();
- APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
- ShadowMul = ConstantInt::get(Elt->getType(), V2);
+ if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
+ APInt V = Elt->getValue();
+ APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
+ ShadowMul = ConstantInt::get(Ty, V2);
+ } else {
+ ShadowMul = ConstantInt::get(Ty, 1);
+ }
}
IRBuilder<> IRB(&I);
/// \brief Instrument signed relational comparisons.
///
- /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by
- /// propagating the highest bit of the shadow. Everything else is delegated
- /// to handleShadowOr().
+ /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
+ /// bit of the shadow. Everything else is delegated to handleShadowOr().
void handleSignedRelationalComparison(ICmpInst &I) {
- Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
- Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
- Value* op = nullptr;
- CmpInst::Predicate pre = I.getPredicate();
- if (constOp0 && constOp0->isNullValue() &&
- (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
- op = I.getOperand(1);
- } else if (constOp1 && constOp1->isNullValue() &&
- (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) {
+ Constant *constOp;
+ Value *op = nullptr;
+ CmpInst::Predicate pre;
+ if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
op = I.getOperand(0);
+ pre = I.getPredicate();
+ } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
+ op = I.getOperand(1);
+ pre = I.getSwappedPredicate();
+ } else {
+ handleShadowOr(I);
+ return;
}
- if (op) {
+
+ if ((constOp->isNullValue() &&
+ (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
+ (constOp->isAllOnesValue() &&
+ (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
IRBuilder<> IRB(&I);
- Value* Shadow =
- IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt");
+ Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
+ "_msprop_icmp_s");
setShadow(&I, Shadow);
setOrigin(&I, getOrigin(op));
} else {
/// Similar situation exists for memcpy and memset.
void visitMemMoveInst(MemMoveInst &I) {
IRBuilder<> IRB(&I);
- IRB.CreateCall3(
- MS.MemmoveFn,
- IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
+ IRB.CreateCall(
+ MS.MemmoveFn,
+ {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
}
// alignment.
void visitMemCpyInst(MemCpyInst &I) {
IRBuilder<> IRB(&I);
- IRB.CreateCall3(
- MS.MemcpyFn,
- IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
+ IRB.CreateCall(
+ MS.MemcpyFn,
+ {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
}
// Same as memcpy.
void visitMemSetInst(MemSetInst &I) {
IRBuilder<> IRB(&I);
- IRB.CreateCall3(
- MS.MemsetFn,
- IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
- IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
- IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
+ IRB.CreateCall(
+ MS.MemsetFn,
+ {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
+ IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
+ IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
I.eraseFromParent();
}
VAHelper->visitVACopyInst(I);
}
- enum IntrinsicKind {
- IK_DoesNotAccessMemory,
- IK_OnlyReadsMemory,
- IK_WritesMemory
- };
-
- static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) {
- const int DoesNotAccessMemory = IK_DoesNotAccessMemory;
- const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory;
- const int OnlyReadsMemory = IK_OnlyReadsMemory;
- const int OnlyAccessesArgumentPointees = IK_WritesMemory;
- const int UnknownModRefBehavior = IK_WritesMemory;
-#define GET_INTRINSIC_MODREF_BEHAVIOR
-#define ModRefBehavior IntrinsicKind
-#include "llvm/IR/Intrinsics.gen"
-#undef ModRefBehavior
-#undef GET_INTRINSIC_MODREF_BEHAVIOR
- }
-
/// \brief Handle vector store-like intrinsics.
///
/// Instrument intrinsics that look like a simple SIMD store: writes memory,
if (NumArgOperands == 0)
return false;
- Intrinsic::ID iid = I.getIntrinsicID();
- IntrinsicKind IK = getIntrinsicKind(iid);
- bool OnlyReadsMemory = IK == IK_OnlyReadsMemory;
- bool WritesMemory = IK == IK_WritesMemory;
- assert(!(OnlyReadsMemory && WritesMemory));
-
if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
I.getArgOperand(1)->getType()->isVectorTy() &&
I.getType()->isVoidTy() &&
- WritesMemory) {
+ !I.onlyReadsMemory()) {
// This looks like a vector store.
return handleVectorStoreIntrinsic(I);
}
if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
I.getType()->isVectorTy() &&
- OnlyReadsMemory) {
+ I.onlyReadsMemory()) {
// This looks like a vector load.
return handleVectorLoadIntrinsic(I);
}
- if (!OnlyReadsMemory && !WritesMemory)
+ if (I.doesNotAccessMemory())
if (maybeHandleSimpleNomemIntrinsic(I))
return true;
Value *CopyOp, *ConvertOp;
switch (I.getNumArgOperands()) {
+ case 3:
+ assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode");
case 2:
CopyOp = I.getArgOperand(0);
ConvertOp = I.getArgOperand(1);
: Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
Value *V1 = I.getOperand(0);
Value *V2 = I.getOperand(1);
- Value *Shift = IRB.CreateCall2(I.getCalledValue(),
- IRB.CreateBitCast(S1, V1->getType()), V2);
+ Value *Shift = IRB.CreateCall(I.getCalledValue(),
+ {IRB.CreateBitCast(S1, V1->getType()), V2});
Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
setShadow(&I, IRB.CreateOr(Shift, S2Conv));
setOriginForNaryOp(I);
Function *ShadowFn = Intrinsic::getDeclaration(
F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
- Value *S = IRB.CreateCall2(ShadowFn, S1_ext, S2_ext, "_msprop_vector_pack");
+ Value *S =
+ IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
setShadow(&I, S);
setOriginForNaryOp(I);
// Now, get the shadow for the RetVal.
if (!I.getType()->isSized()) return;
+ // Don't emit the epilogue for musttail call returns.
+ if (CS.isCall() && cast<CallInst>(&I)->isMustTailCall()) return;
IRBuilder<> IRBBefore(&I);
// Until we have full dynamic coverage, make sure the retval shadow is 0.
Value *Base = getShadowPtrForRetval(&I, IRBBefore);
IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
- Instruction *NextInsn = nullptr;
+ BasicBlock::iterator NextInsn;
if (CS.isCall()) {
- NextInsn = I.getNextNode();
+ NextInsn = ++I.getIterator();
+ assert(NextInsn != I.getParent()->end());
} else {
BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
if (!NormalDest->getSinglePredecessor()) {
return;
}
NextInsn = NormalDest->getFirstInsertionPt();
- assert(NextInsn &&
+ assert(NextInsn != NormalDest->end() &&
"Could not find insertion point for retval shadow load");
}
- IRBuilder<> IRBAfter(NextInsn);
+ IRBuilder<> IRBAfter(&*NextInsn);
Value *RetvalShadow =
IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
kShadowTLSAlignment, "_msret");
setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
}
+ bool isAMustTailRetVal(Value *RetVal) {
+ if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
+ RetVal = I->getOperand(0);
+ }
+ if (auto *I = dyn_cast<CallInst>(RetVal)) {
+ return I->isMustTailCall();
+ }
+ return false;
+ }
+
void visitReturnInst(ReturnInst &I) {
IRBuilder<> IRB(&I);
Value *RetVal = I.getReturnValue();
if (!RetVal) return;
+ // Don't emit the epilogue for musttail call returns.
+ if (isAMustTailRetVal(RetVal)) return;
Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
if (CheckReturnValue) {
insertShadowCheck(RetVal, &I);
const DataLayout &DL = F.getParent()->getDataLayout();
uint64_t Size = DL.getTypeAllocSize(I.getAllocatedType());
if (PoisonStack && ClPoisonStackWithCall) {
- IRB.CreateCall2(MS.MsanPoisonStackFn,
- IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
- ConstantInt::get(MS.IntptrTy, Size));
+ IRB.CreateCall(MS.MsanPoisonStackFn,
+ {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
+ ConstantInt::get(MS.IntptrTy, Size)});
} else {
Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
createPrivateNonConstGlobalForString(*F.getParent(),
StackDescription.str());
- IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn,
- IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
+ IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
+ {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
ConstantInt::get(MS.IntptrTy, Size),
IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
- IRB.CreatePointerCast(&F, MS.IntptrTy));
+ IRB.CreatePointerCast(&F, MS.IntptrTy)});
}
}
setOrigin(&I, getCleanOrigin());
}
+ void visitCleanupPadInst(CleanupPadInst &I) {
+ setShadow(&I, getCleanShadow(&I));
+ setOrigin(&I, getCleanOrigin());
+ }
+
+ void visitCatchPad(CatchPadInst &I) {
+ setShadow(&I, getCleanShadow(&I));
+ setOrigin(&I, getCleanOrigin());
+ }
+
+ void visitTerminatePad(TerminatePadInst &I) {
+ DEBUG(dbgs() << "TerminatePad: " << I << "\n");
+ // Nothing to do here.
+ }
+
+ void visitCatchEndPadInst(CatchEndPadInst &I) {
+ DEBUG(dbgs() << "CatchEndPad: " << I << "\n");
+ // Nothing to do here.
+ }
+
+ void visitCleanupEndPadInst(CleanupEndPadInst &I) {
+ DEBUG(dbgs() << "CleanupEndPad: " << I << "\n");
+ // Nothing to do here.
+ }
+
void visitGetElementPtrInst(GetElementPtrInst &I) {
handleShadowOr(I);
}
// Nothing to do here.
}
+ void visitCleanupReturnInst(CleanupReturnInst &CRI) {
+ DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
+ // Nothing to do here.
+ }
+
+ void visitCatchReturnInst(CatchReturnInst &CRI) {
+ DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
+ // Nothing to do here.
+ }
+
void visitInstruction(Instruction &I) {
// Everything else: stop propagating and check for poisoned shadow.
if (ClDumpStrictInstructions)
}
void visitVAStartInst(VAStartInst &I) override {
+ if (F.getCallingConv() == CallingConv::X86_64_Win64)
+ return;
IRBuilder<> IRB(&I);
VAStartInstrumentationList.push_back(&I);
Value *VAListTag = I.getArgOperand(0);
}
void visitVACopyInst(VACopyInst &I) override {
+ if (F.getCallingConv() == CallingConv::X86_64_Win64)
+ return;
IRBuilder<> IRB(&I);
Value *VAListTag = I.getArgOperand(0);
Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
Value *OverflowArgAreaShadowPtr =
MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
- Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset);
+ Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
+ AMD64FpEndOffset);
IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
}
}
return new VarArgNoOpHelper(Func, Msan, Visitor);
}
-} // namespace
+} // anonymous namespace
bool MemorySanitizer::runOnFunction(Function &F) {
+ if (&F == MsanCtorFunction)
+ return false;
MemorySanitizerVisitor Visitor(F, *this);
// Clear out readonly/readnone attributes.