1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
13 /// Status: early prototype.
15 /// The algorithm of the tool is similar to Memcheck
16 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
17 /// byte of the application memory, poison the shadow of the malloc-ed
18 /// or alloca-ed memory, load the shadow bits on every memory read,
19 /// propagate the shadow bits through some of the arithmetic
20 /// instruction (including MOV), store the shadow bits on every memory
21 /// write, report a bug on some other instructions (e.g. JMP) if the
22 /// associated shadow is poisoned.
24 /// But there are differences too. The first and the major one:
25 /// compiler instrumentation instead of binary instrumentation. This
26 /// gives us much better register allocation, possible compiler
27 /// optimizations and a fast start-up. But this brings the major issue
28 /// as well: msan needs to see all program events, including system
29 /// calls and reads/writes in system libraries, so we either need to
30 /// compile *everything* with msan or use a binary translation
31 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
32 /// Another difference from Memcheck is that we use 8 shadow bits per
33 /// byte of application memory and use a direct shadow mapping. This
34 /// greatly simplifies the instrumentation code and avoids races on
35 /// shadow updates (Memcheck is single-threaded so races are not a
36 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
37 /// path storage that uses 8 bits per byte).
39 /// The default value of shadow is 0, which means "clean" (not poisoned).
41 /// Every module initializer should call __msan_init to ensure that the
42 /// shadow memory is ready. On error, __msan_warning is called. Since
43 /// parameters and return values may be passed via registers, we have a
44 /// specialized thread-local shadow for return values
45 /// (__msan_retval_tls) and parameters (__msan_param_tls).
49 /// MemorySanitizer can track origins (allocation points) of all uninitialized
50 /// values. This behavior is controlled with a flag (msan-track-origins) and is
51 /// disabled by default.
53 /// Origins are 4-byte values created and interpreted by the runtime library.
54 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
55 /// of application memory. Propagation of origins is basically a bunch of
56 /// "select" instructions that pick the origin of a dirty argument, if an
57 /// instruction has one.
59 /// Every 4 aligned, consecutive bytes of application memory have one origin
60 /// value associated with them. If these bytes contain uninitialized data
61 /// coming from 2 different allocations, the last store wins. Because of this,
62 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
65 /// Origins are meaningless for fully initialized values, so MemorySanitizer
66 /// avoids storing origin to memory when a fully initialized value is stored.
67 /// This way it avoids needless overwritting origin of the 4-byte region on
68 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
69 //===----------------------------------------------------------------------===//
71 #define DEBUG_TYPE "msan"
73 #include "llvm/Transforms/Instrumentation.h"
74 #include "llvm/ADT/DepthFirstIterator.h"
75 #include "llvm/ADT/SmallString.h"
76 #include "llvm/ADT/SmallVector.h"
77 #include "llvm/ADT/Triple.h"
78 #include "llvm/ADT/ValueMap.h"
79 #include "llvm/IR/DataLayout.h"
80 #include "llvm/IR/Function.h"
81 #include "llvm/IR/IRBuilder.h"
82 #include "llvm/IR/InlineAsm.h"
83 #include "llvm/IR/IntrinsicInst.h"
84 #include "llvm/IR/LLVMContext.h"
85 #include "llvm/IR/MDBuilder.h"
86 #include "llvm/IR/Module.h"
87 #include "llvm/IR/Type.h"
88 #include "llvm/InstVisitor.h"
89 #include "llvm/Support/CommandLine.h"
90 #include "llvm/Support/Compiler.h"
91 #include "llvm/Support/Debug.h"
92 #include "llvm/Support/raw_ostream.h"
93 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
94 #include "llvm/Transforms/Utils/Local.h"
95 #include "llvm/Transforms/Utils/ModuleUtils.h"
96 #include "llvm/Transforms/Utils/SpecialCaseList.h"
100 static const uint64_t kShadowMask32 = 1ULL << 31;
101 static const uint64_t kShadowMask64 = 1ULL << 46;
102 static const uint64_t kOriginOffset32 = 1ULL << 30;
103 static const uint64_t kOriginOffset64 = 1ULL << 45;
104 static const unsigned kMinOriginAlignment = 4;
105 static const unsigned kShadowTLSAlignment = 8;
107 /// \brief Track origins of uninitialized values.
109 /// Adds a section to MemorySanitizer report that points to the allocation
110 /// (stack or heap) the uninitialized bits came from originally.
111 static cl::opt<bool> ClTrackOrigins("msan-track-origins",
112 cl::desc("Track origins (allocation sites) of poisoned memory"),
113 cl::Hidden, cl::init(false));
114 static cl::opt<bool> ClKeepGoing("msan-keep-going",
115 cl::desc("keep going after reporting a UMR"),
116 cl::Hidden, cl::init(false));
117 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
118 cl::desc("poison uninitialized stack variables"),
119 cl::Hidden, cl::init(true));
120 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
121 cl::desc("poison uninitialized stack variables with a call"),
122 cl::Hidden, cl::init(false));
123 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
124 cl::desc("poison uninitialized stack variables with the given patter"),
125 cl::Hidden, cl::init(0xff));
126 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
127 cl::desc("poison undef temps"),
128 cl::Hidden, cl::init(true));
130 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
131 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
132 cl::Hidden, cl::init(true));
134 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
135 cl::desc("exact handling of relational integer ICmp"),
136 cl::Hidden, cl::init(false));
138 static cl::opt<bool> ClStoreCleanOrigin("msan-store-clean-origin",
139 cl::desc("store origin for clean (fully initialized) values"),
140 cl::Hidden, cl::init(false));
142 // This flag controls whether we check the shadow of the address
143 // operand of load or store. Such bugs are very rare, since load from
144 // a garbage address typically results in SEGV, but still happen
145 // (e.g. only lower bits of address are garbage, or the access happens
146 // early at program startup where malloc-ed memory is more likely to
147 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
148 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
149 cl::desc("report accesses through a pointer which has poisoned shadow"),
150 cl::Hidden, cl::init(true));
152 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
153 cl::desc("print out instructions with default strict semantics"),
154 cl::Hidden, cl::init(false));
156 static cl::opt<std::string> ClBlacklistFile("msan-blacklist",
157 cl::desc("File containing the list of functions where MemorySanitizer "
158 "should not report bugs"), cl::Hidden);
160 // Experimental. Wraps all indirect calls in the instrumented code with
161 // a call to the given function. This is needed to assist the dynamic
162 // helper tool (MSanDR) to regain control on transition between instrumented and
163 // non-instrumented code.
164 static cl::opt<std::string> ClWrapIndirectCalls("msan-wrap-indirect-calls",
165 cl::desc("Wrap indirect calls with a given function"),
170 /// \brief An instrumentation pass implementing detection of uninitialized
173 /// MemorySanitizer: instrument the code in module to find
174 /// uninitialized reads.
175 class MemorySanitizer : public FunctionPass {
177 MemorySanitizer(bool TrackOrigins = false,
178 StringRef BlacklistFile = StringRef())
180 TrackOrigins(TrackOrigins || ClTrackOrigins),
183 BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile),
184 WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
185 const char *getPassName() const { return "MemorySanitizer"; }
186 bool runOnFunction(Function &F);
187 bool doInitialization(Module &M);
188 static char ID; // Pass identification, replacement for typeid.
191 void initializeCallbacks(Module &M);
193 /// \brief Track origins (allocation points) of uninitialized values.
200 /// \brief Thread-local shadow storage for function parameters.
201 GlobalVariable *ParamTLS;
202 /// \brief Thread-local origin storage for function parameters.
203 GlobalVariable *ParamOriginTLS;
204 /// \brief Thread-local shadow storage for function return value.
205 GlobalVariable *RetvalTLS;
206 /// \brief Thread-local origin storage for function return value.
207 GlobalVariable *RetvalOriginTLS;
208 /// \brief Thread-local shadow storage for in-register va_arg function
209 /// parameters (x86_64-specific).
210 GlobalVariable *VAArgTLS;
211 /// \brief Thread-local shadow storage for va_arg overflow area
212 /// (x86_64-specific).
213 GlobalVariable *VAArgOverflowSizeTLS;
214 /// \brief Thread-local space used to pass origin value to the UMR reporting
216 GlobalVariable *OriginTLS;
218 /// \brief The run-time callback to print a warning.
220 /// \brief Run-time helper that copies origin info for a memory range.
221 Value *MsanCopyOriginFn;
222 /// \brief Run-time helper that generates a new origin value for a stack
224 Value *MsanSetAllocaOrigin4Fn;
225 /// \brief Run-time helper that poisons stack on function entry.
226 Value *MsanPoisonStackFn;
227 /// \brief MSan runtime replacements for memmove, memcpy and memset.
228 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
230 /// \brief Address mask used in application-to-shadow address calculation.
231 /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask.
233 /// \brief Offset of the origin shadow from the "normal" shadow.
234 /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL
235 uint64_t OriginOffset;
236 /// \brief Branch weights for error reporting.
237 MDNode *ColdCallWeights;
238 /// \brief Branch weights for origin store.
239 MDNode *OriginStoreWeights;
240 /// \brief Path to blacklist file.
241 SmallString<64> BlacklistFile;
242 /// \brief The blacklist.
243 OwningPtr<SpecialCaseList> BL;
244 /// \brief An empty volatile inline asm that prevents callback merge.
247 bool WrapIndirectCalls;
248 /// \brief Run-time wrapper for indirect calls.
249 Value *IndirectCallWrapperFn;
250 // Argument and return type of IndirectCallWrapperFn: void (*f)(void).
251 Type *AnyFunctionPtrTy;
253 friend struct MemorySanitizerVisitor;
254 friend struct VarArgAMD64Helper;
258 char MemorySanitizer::ID = 0;
259 INITIALIZE_PASS(MemorySanitizer, "msan",
260 "MemorySanitizer: detects uninitialized reads.",
263 FunctionPass *llvm::createMemorySanitizerPass(bool TrackOrigins,
264 StringRef BlacklistFile) {
265 return new MemorySanitizer(TrackOrigins, BlacklistFile);
268 /// \brief Create a non-const global initialized with the given string.
270 /// Creates a writable global for Str so that we can pass it to the
271 /// run-time lib. Runtime uses first 4 bytes of the string to store the
272 /// frame ID, so the string needs to be mutable.
273 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
275 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
276 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
277 GlobalValue::PrivateLinkage, StrConst, "");
281 /// \brief Insert extern declaration of runtime-provided functions and globals.
282 void MemorySanitizer::initializeCallbacks(Module &M) {
283 // Only do this once.
288 // Create the callback.
289 // FIXME: this function should have "Cold" calling conv,
290 // which is not yet implemented.
291 StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
292 : "__msan_warning_noreturn";
293 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL);
295 MsanCopyOriginFn = M.getOrInsertFunction(
296 "__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(),
297 IRB.getInt8PtrTy(), IntptrTy, NULL);
298 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
299 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
300 IRB.getInt8PtrTy(), IntptrTy, NULL);
301 MsanPoisonStackFn = M.getOrInsertFunction(
302 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
303 MemmoveFn = M.getOrInsertFunction(
304 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
305 IRB.getInt8PtrTy(), IntptrTy, NULL);
306 MemcpyFn = M.getOrInsertFunction(
307 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
309 MemsetFn = M.getOrInsertFunction(
310 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
314 RetvalTLS = new GlobalVariable(
315 M, ArrayType::get(IRB.getInt64Ty(), 8), false,
316 GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0,
317 GlobalVariable::InitialExecTLSModel);
318 RetvalOriginTLS = new GlobalVariable(
319 M, OriginTy, false, GlobalVariable::ExternalLinkage, 0,
320 "__msan_retval_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
322 ParamTLS = new GlobalVariable(
323 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
324 GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0,
325 GlobalVariable::InitialExecTLSModel);
326 ParamOriginTLS = new GlobalVariable(
327 M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
328 0, "__msan_param_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
330 VAArgTLS = new GlobalVariable(
331 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
332 GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0,
333 GlobalVariable::InitialExecTLSModel);
334 VAArgOverflowSizeTLS = new GlobalVariable(
335 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0,
336 "__msan_va_arg_overflow_size_tls", 0,
337 GlobalVariable::InitialExecTLSModel);
338 OriginTLS = new GlobalVariable(
339 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0,
340 "__msan_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
342 // We insert an empty inline asm after __msan_report* to avoid callback merge.
343 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
344 StringRef(""), StringRef(""),
345 /*hasSideEffects=*/true);
347 if (WrapIndirectCalls) {
349 PointerType::getUnqual(FunctionType::get(IRB.getVoidTy(), false));
350 IndirectCallWrapperFn = M.getOrInsertFunction(
351 ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL);
355 /// \brief Module-level initialization.
357 /// inserts a call to __msan_init to the module's constructor list.
358 bool MemorySanitizer::doInitialization(Module &M) {
359 TD = getAnalysisIfAvailable<DataLayout>();
362 BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
363 C = &(M.getContext());
364 unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0);
367 ShadowMask = kShadowMask64;
368 OriginOffset = kOriginOffset64;
371 ShadowMask = kShadowMask32;
372 OriginOffset = kOriginOffset32;
375 report_fatal_error("unsupported pointer size");
380 IntptrTy = IRB.getIntPtrTy(TD);
381 OriginTy = IRB.getInt32Ty();
383 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
384 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
386 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
387 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
388 "__msan_init", IRB.getVoidTy(), NULL)), 0);
391 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
392 IRB.getInt32(TrackOrigins), "__msan_track_origins");
395 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
396 IRB.getInt32(ClKeepGoing), "__msan_keep_going");
403 /// \brief A helper class that handles instrumentation of VarArg
404 /// functions on a particular platform.
406 /// Implementations are expected to insert the instrumentation
407 /// necessary to propagate argument shadow through VarArg function
408 /// calls. Visit* methods are called during an InstVisitor pass over
409 /// the function, and should avoid creating new basic blocks. A new
410 /// instance of this class is created for each instrumented function.
411 struct VarArgHelper {
412 /// \brief Visit a CallSite.
413 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
415 /// \brief Visit a va_start call.
416 virtual void visitVAStartInst(VAStartInst &I) = 0;
418 /// \brief Visit a va_copy call.
419 virtual void visitVACopyInst(VACopyInst &I) = 0;
421 /// \brief Finalize function instrumentation.
423 /// This method is called after visiting all interesting (see above)
424 /// instructions in a function.
425 virtual void finalizeInstrumentation() = 0;
427 virtual ~VarArgHelper() {}
430 struct MemorySanitizerVisitor;
433 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
434 MemorySanitizerVisitor &Visitor);
436 /// This class does all the work for a given function. Store and Load
437 /// instructions store and load corresponding shadow and origin
438 /// values. Most instructions propagate shadow from arguments to their
439 /// return values. Certain instructions (most importantly, BranchInst)
440 /// test their argument shadow and print reports (with a runtime call) if it's
442 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
445 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
446 ValueMap<Value*, Value*> ShadowMap, OriginMap;
451 bool CheckReturnValue;
452 OwningPtr<VarArgHelper> VAHelper;
454 struct ShadowOriginAndInsertPoint {
457 Instruction *OrigIns;
458 ShadowOriginAndInsertPoint(Instruction *S, Instruction *O, Instruction *I)
459 : Shadow(S), Origin(O), OrigIns(I) { }
460 ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { }
462 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
463 SmallVector<Instruction*, 16> StoreList;
465 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
466 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
467 bool SanitizeFunction = !MS.BL->isIn(F) && F.getAttributes().hasAttribute(
468 AttributeSet::FunctionIndex,
469 Attribute::SanitizeMemory);
470 InsertChecks = SanitizeFunction;
471 LoadShadow = SanitizeFunction;
472 PoisonStack = SanitizeFunction && ClPoisonStack;
473 PoisonUndef = SanitizeFunction && ClPoisonUndef;
474 // FIXME: Consider using SpecialCaseList to specify a list of functions that
475 // must always return fully initialized values. For now, we hardcode "main".
476 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
478 DEBUG(if (!InsertChecks)
479 dbgs() << "MemorySanitizer is not inserting checks into '"
480 << F.getName() << "'\n");
483 void materializeStores() {
484 for (size_t i = 0, n = StoreList.size(); i < n; i++) {
485 StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]);
488 Value *Val = I.getValueOperand();
489 Value *Addr = I.getPointerOperand();
490 Value *Shadow = getShadow(Val);
491 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
494 IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment());
495 DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
498 if (ClCheckAccessAddress)
499 insertCheck(Addr, &I);
501 if (MS.TrackOrigins) {
502 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
503 if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) {
504 IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB),
507 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
509 Constant *Cst = dyn_cast_or_null<Constant>(ConvertedShadow);
510 // TODO(eugenis): handle non-zero constant shadow by inserting an
511 // unconditional check (can not simply fail compilation as this could
512 // be in the dead code).
516 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
517 getCleanShadow(ConvertedShadow), "_mscmp");
518 Instruction *CheckTerm =
519 SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false,
520 MS.OriginStoreWeights);
521 IRBuilder<> IRBNew(CheckTerm);
522 IRBNew.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRBNew),
529 void materializeChecks() {
530 for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
531 Instruction *Shadow = InstrumentationList[i].Shadow;
532 Instruction *OrigIns = InstrumentationList[i].OrigIns;
533 IRBuilder<> IRB(OrigIns);
534 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
535 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
536 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
537 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
538 getCleanShadow(ConvertedShadow), "_mscmp");
539 Instruction *CheckTerm =
540 SplitBlockAndInsertIfThen(cast<Instruction>(Cmp),
541 /* Unreachable */ !ClKeepGoing,
544 IRB.SetInsertPoint(CheckTerm);
545 if (MS.TrackOrigins) {
546 Instruction *Origin = InstrumentationList[i].Origin;
547 IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0),
550 CallInst *Call = IRB.CreateCall(MS.WarningFn);
551 Call->setDebugLoc(OrigIns->getDebugLoc());
552 IRB.CreateCall(MS.EmptyAsm);
553 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
555 DEBUG(dbgs() << "DONE:\n" << F);
558 /// \brief Add MemorySanitizer instrumentation to a function.
559 bool runOnFunction() {
560 MS.initializeCallbacks(*F.getParent());
561 if (!MS.TD) return false;
563 // In the presence of unreachable blocks, we may see Phi nodes with
564 // incoming nodes from such blocks. Since InstVisitor skips unreachable
565 // blocks, such nodes will not have any shadow value associated with them.
566 // It's easier to remove unreachable blocks than deal with missing shadow.
567 removeUnreachableBlocks(F);
569 // Iterate all BBs in depth-first order and create shadow instructions
570 // for all instructions (where applicable).
571 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
572 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
573 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
574 BasicBlock *BB = *DI;
578 // Finalize PHI nodes.
579 for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
580 PHINode *PN = ShadowPHINodes[i];
581 PHINode *PNS = cast<PHINode>(getShadow(PN));
582 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0;
583 size_t NumValues = PN->getNumIncomingValues();
584 for (size_t v = 0; v < NumValues; v++) {
585 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
587 PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
591 VAHelper->finalizeInstrumentation();
593 // Delayed instrumentation of StoreInst.
594 // This may add new checks to be inserted later.
597 // Insert shadow value checks.
603 /// \brief Compute the shadow type that corresponds to a given Value.
604 Type *getShadowTy(Value *V) {
605 return getShadowTy(V->getType());
608 /// \brief Compute the shadow type that corresponds to a given Type.
609 Type *getShadowTy(Type *OrigTy) {
610 if (!OrigTy->isSized()) {
613 // For integer type, shadow is the same as the original type.
614 // This may return weird-sized types like i1.
615 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
617 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
618 uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType());
619 return VectorType::get(IntegerType::get(*MS.C, EltSize),
620 VT->getNumElements());
622 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
623 SmallVector<Type*, 4> Elements;
624 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
625 Elements.push_back(getShadowTy(ST->getElementType(i)));
626 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
627 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
630 uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy);
631 return IntegerType::get(*MS.C, TypeSize);
634 /// \brief Flatten a vector type.
635 Type *getShadowTyNoVec(Type *ty) {
636 if (VectorType *vt = dyn_cast<VectorType>(ty))
637 return IntegerType::get(*MS.C, vt->getBitWidth());
641 /// \brief Convert a shadow value to it's flattened variant.
642 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
643 Type *Ty = V->getType();
644 Type *NoVecTy = getShadowTyNoVec(Ty);
645 if (Ty == NoVecTy) return V;
646 return IRB.CreateBitCast(V, NoVecTy);
649 /// \brief Compute the shadow address that corresponds to a given application
652 /// Shadow = Addr & ~ShadowMask.
653 Value *getShadowPtr(Value *Addr, Type *ShadowTy,
656 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
657 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
658 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
661 /// \brief Compute the origin address that corresponds to a given application
664 /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL
665 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) {
667 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
668 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
670 IRB.CreateAdd(ShadowLong,
671 ConstantInt::get(MS.IntptrTy, MS.OriginOffset));
673 IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL));
674 return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0));
677 /// \brief Compute the shadow address for a given function argument.
679 /// Shadow = ParamTLS+ArgOffset.
680 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
682 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
683 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
684 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
688 /// \brief Compute the origin address for a given function argument.
689 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
691 if (!MS.TrackOrigins) return 0;
692 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
693 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
694 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
698 /// \brief Compute the shadow address for a retval.
699 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
700 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
701 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
705 /// \brief Compute the origin address for a retval.
706 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
707 // We keep a single origin for the entire retval. Might be too optimistic.
708 return MS.RetvalOriginTLS;
711 /// \brief Set SV to be the shadow value for V.
712 void setShadow(Value *V, Value *SV) {
713 assert(!ShadowMap.count(V) && "Values may only have one shadow");
717 /// \brief Set Origin to be the origin value for V.
718 void setOrigin(Value *V, Value *Origin) {
719 if (!MS.TrackOrigins) return;
720 assert(!OriginMap.count(V) && "Values may only have one origin");
721 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
722 OriginMap[V] = Origin;
725 /// \brief Create a clean shadow value for a given value.
727 /// Clean shadow (all zeroes) means all bits of the value are defined
729 Constant *getCleanShadow(Value *V) {
730 Type *ShadowTy = getShadowTy(V);
733 return Constant::getNullValue(ShadowTy);
736 /// \brief Create a dirty shadow of a given shadow type.
737 Constant *getPoisonedShadow(Type *ShadowTy) {
739 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
740 return Constant::getAllOnesValue(ShadowTy);
741 StructType *ST = cast<StructType>(ShadowTy);
742 SmallVector<Constant *, 4> Vals;
743 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
744 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
745 return ConstantStruct::get(ST, Vals);
748 /// \brief Create a dirty shadow for a given value.
749 Constant *getPoisonedShadow(Value *V) {
750 Type *ShadowTy = getShadowTy(V);
753 return getPoisonedShadow(ShadowTy);
756 /// \brief Create a clean (zero) origin.
757 Value *getCleanOrigin() {
758 return Constant::getNullValue(MS.OriginTy);
761 /// \brief Get the shadow value for a given Value.
763 /// This function either returns the value set earlier with setShadow,
764 /// or extracts if from ParamTLS (for function arguments).
765 Value *getShadow(Value *V) {
766 if (Instruction *I = dyn_cast<Instruction>(V)) {
767 // For instructions the shadow is already stored in the map.
768 Value *Shadow = ShadowMap[V];
770 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
772 assert(Shadow && "No shadow for a value");
776 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
777 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
778 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
782 if (Argument *A = dyn_cast<Argument>(V)) {
783 // For arguments we compute the shadow on demand and store it in the map.
784 Value **ShadowPtr = &ShadowMap[V];
787 Function *F = A->getParent();
788 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
789 unsigned ArgOffset = 0;
790 for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
792 if (!AI->getType()->isSized()) {
793 DEBUG(dbgs() << "Arg is not sized\n");
796 unsigned Size = AI->hasByValAttr()
797 ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType())
798 : MS.TD->getTypeAllocSize(AI->getType());
800 Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
801 if (AI->hasByValAttr()) {
802 // ByVal pointer itself has clean shadow. We copy the actual
803 // argument shadow to the underlying memory.
804 // Figure out maximal valid memcpy alignment.
805 unsigned ArgAlign = AI->getParamAlignment();
807 Type *EltType = A->getType()->getPointerElementType();
808 ArgAlign = MS.TD->getABITypeAlignment(EltType);
810 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
811 Value *Cpy = EntryIRB.CreateMemCpy(
812 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
814 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
816 *ShadowPtr = getCleanShadow(V);
818 *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
820 DEBUG(dbgs() << " ARG: " << *AI << " ==> " <<
821 **ShadowPtr << "\n");
822 if (MS.TrackOrigins) {
823 Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset);
824 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
827 ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment);
829 assert(*ShadowPtr && "Could not find shadow for an argument");
832 // For everything else the shadow is zero.
833 return getCleanShadow(V);
836 /// \brief Get the shadow for i-th argument of the instruction I.
837 Value *getShadow(Instruction *I, int i) {
838 return getShadow(I->getOperand(i));
841 /// \brief Get the origin for a value.
842 Value *getOrigin(Value *V) {
843 if (!MS.TrackOrigins) return 0;
844 if (isa<Instruction>(V) || isa<Argument>(V)) {
845 Value *Origin = OriginMap[V];
847 DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n");
848 Origin = getCleanOrigin();
852 return getCleanOrigin();
855 /// \brief Get the origin for i-th argument of the instruction I.
856 Value *getOrigin(Instruction *I, int i) {
857 return getOrigin(I->getOperand(i));
860 /// \brief Remember the place where a shadow check should be inserted.
862 /// This location will be later instrumented with a check that will print a
863 /// UMR warning in runtime if the value is not fully defined.
864 void insertCheck(Value *Val, Instruction *OrigIns) {
866 if (!InsertChecks) return;
867 Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
870 Type *ShadowTy = Shadow->getType();
871 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
872 "Can only insert checks for integer and vector shadow types");
874 Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
875 InstrumentationList.push_back(
876 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
879 // ------------------- Visitors.
881 /// \brief Instrument LoadInst
883 /// Loads the corresponding shadow and (optionally) origin.
884 /// Optionally, checks that the load address is fully defined.
885 void visitLoadInst(LoadInst &I) {
886 assert(I.getType()->isSized() && "Load type must have size");
888 Type *ShadowTy = getShadowTy(&I);
889 Value *Addr = I.getPointerOperand();
891 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
893 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
895 setShadow(&I, getCleanShadow(&I));
898 if (ClCheckAccessAddress)
899 insertCheck(I.getPointerOperand(), &I);
901 if (MS.TrackOrigins) {
903 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
905 IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
907 setOrigin(&I, getCleanOrigin());
912 /// \brief Instrument StoreInst
914 /// Stores the corresponding shadow and (optionally) origin.
915 /// Optionally, checks that the store address is fully defined.
916 void visitStoreInst(StoreInst &I) {
917 StoreList.push_back(&I);
920 // Vector manipulation.
921 void visitExtractElementInst(ExtractElementInst &I) {
922 insertCheck(I.getOperand(1), &I);
924 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
926 setOrigin(&I, getOrigin(&I, 0));
929 void visitInsertElementInst(InsertElementInst &I) {
930 insertCheck(I.getOperand(2), &I);
932 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
933 I.getOperand(2), "_msprop"));
934 setOriginForNaryOp(I);
937 void visitShuffleVectorInst(ShuffleVectorInst &I) {
938 insertCheck(I.getOperand(2), &I);
940 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
941 I.getOperand(2), "_msprop"));
942 setOriginForNaryOp(I);
946 void visitSExtInst(SExtInst &I) {
948 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
949 setOrigin(&I, getOrigin(&I, 0));
952 void visitZExtInst(ZExtInst &I) {
954 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
955 setOrigin(&I, getOrigin(&I, 0));
958 void visitTruncInst(TruncInst &I) {
960 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
961 setOrigin(&I, getOrigin(&I, 0));
964 void visitBitCastInst(BitCastInst &I) {
966 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
967 setOrigin(&I, getOrigin(&I, 0));
970 void visitPtrToIntInst(PtrToIntInst &I) {
972 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
973 "_msprop_ptrtoint"));
974 setOrigin(&I, getOrigin(&I, 0));
977 void visitIntToPtrInst(IntToPtrInst &I) {
979 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
980 "_msprop_inttoptr"));
981 setOrigin(&I, getOrigin(&I, 0));
984 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
985 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
986 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
987 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
988 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
989 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
991 /// \brief Propagate shadow for bitwise AND.
993 /// This code is exact, i.e. if, for example, a bit in the left argument
994 /// is defined and 0, then neither the value not definedness of the
995 /// corresponding bit in B don't affect the resulting shadow.
996 void visitAnd(BinaryOperator &I) {
998 // "And" of 0 and a poisoned value results in unpoisoned value.
999 // 1&1 => 1; 0&1 => 0; p&1 => p;
1000 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1001 // 1&p => p; 0&p => 0; p&p => p;
1002 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1003 Value *S1 = getShadow(&I, 0);
1004 Value *S2 = getShadow(&I, 1);
1005 Value *V1 = I.getOperand(0);
1006 Value *V2 = I.getOperand(1);
1007 if (V1->getType() != S1->getType()) {
1008 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1009 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1011 Value *S1S2 = IRB.CreateAnd(S1, S2);
1012 Value *V1S2 = IRB.CreateAnd(V1, S2);
1013 Value *S1V2 = IRB.CreateAnd(S1, V2);
1014 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1015 setOriginForNaryOp(I);
1018 void visitOr(BinaryOperator &I) {
1019 IRBuilder<> IRB(&I);
1020 // "Or" of 1 and a poisoned value results in unpoisoned value.
1021 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1022 // 1|0 => 1; 0|0 => 0; p|0 => p;
1023 // 1|p => 1; 0|p => p; p|p => p;
1024 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1025 Value *S1 = getShadow(&I, 0);
1026 Value *S2 = getShadow(&I, 1);
1027 Value *V1 = IRB.CreateNot(I.getOperand(0));
1028 Value *V2 = IRB.CreateNot(I.getOperand(1));
1029 if (V1->getType() != S1->getType()) {
1030 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1031 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1033 Value *S1S2 = IRB.CreateAnd(S1, S2);
1034 Value *V1S2 = IRB.CreateAnd(V1, S2);
1035 Value *S1V2 = IRB.CreateAnd(S1, V2);
1036 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1037 setOriginForNaryOp(I);
1040 /// \brief Default propagation of shadow and/or origin.
1042 /// This class implements the general case of shadow propagation, used in all
1043 /// cases where we don't know and/or don't care about what the operation
1044 /// actually does. It converts all input shadow values to a common type
1045 /// (extending or truncating as necessary), and bitwise OR's them.
1047 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1048 /// fully initialized), and less prone to false positives.
1050 /// This class also implements the general case of origin propagation. For a
1051 /// Nary operation, result origin is set to the origin of an argument that is
1052 /// not entirely initialized. If there is more than one such arguments, the
1053 /// rightmost of them is picked. It does not matter which one is picked if all
1054 /// arguments are initialized.
1055 template <bool CombineShadow>
1060 MemorySanitizerVisitor *MSV;
1063 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
1064 Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {}
1066 /// \brief Add a pair of shadow and origin values to the mix.
1067 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1068 if (CombineShadow) {
1073 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1074 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1078 if (MSV->MS.TrackOrigins) {
1083 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1084 Value *Cond = IRB.CreateICmpNE(FlatShadow,
1085 MSV->getCleanShadow(FlatShadow));
1086 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1092 /// \brief Add an application value to the mix.
1093 Combiner &Add(Value *V) {
1094 Value *OpShadow = MSV->getShadow(V);
1095 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0;
1096 return Add(OpShadow, OpOrigin);
1099 /// \brief Set the current combined values as the given instruction's shadow
1101 void Done(Instruction *I) {
1102 if (CombineShadow) {
1104 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1105 MSV->setShadow(I, Shadow);
1107 if (MSV->MS.TrackOrigins) {
1109 MSV->setOrigin(I, Origin);
1114 typedef Combiner<true> ShadowAndOriginCombiner;
1115 typedef Combiner<false> OriginCombiner;
1117 /// \brief Propagate origin for arbitrary operation.
1118 void setOriginForNaryOp(Instruction &I) {
1119 if (!MS.TrackOrigins) return;
1120 IRBuilder<> IRB(&I);
1121 OriginCombiner OC(this, IRB);
1122 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1127 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1128 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1129 "Vector of pointers is not a valid shadow type");
1130 return Ty->isVectorTy() ?
1131 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1132 Ty->getPrimitiveSizeInBits();
1135 /// \brief Cast between two shadow types, extending or truncating as
1137 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy) {
1138 Type *srcTy = V->getType();
1139 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1140 return IRB.CreateIntCast(V, dstTy, false);
1141 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1142 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1143 return IRB.CreateIntCast(V, dstTy, false);
1144 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1145 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1146 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1148 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), false);
1149 return IRB.CreateBitCast(V2, dstTy);
1150 // TODO: handle struct types.
1153 /// \brief Propagate shadow for arbitrary operation.
1154 void handleShadowOr(Instruction &I) {
1155 IRBuilder<> IRB(&I);
1156 ShadowAndOriginCombiner SC(this, IRB);
1157 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1162 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1163 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1164 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1165 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1166 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1167 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1168 void visitMul(BinaryOperator &I) { handleShadowOr(I); }
1170 void handleDiv(Instruction &I) {
1171 IRBuilder<> IRB(&I);
1172 // Strict on the second argument.
1173 insertCheck(I.getOperand(1), &I);
1174 setShadow(&I, getShadow(&I, 0));
1175 setOrigin(&I, getOrigin(&I, 0));
1178 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1179 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1180 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1181 void visitURem(BinaryOperator &I) { handleDiv(I); }
1182 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1183 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1185 /// \brief Instrument == and != comparisons.
1187 /// Sometimes the comparison result is known even if some of the bits of the
1188 /// arguments are not.
1189 void handleEqualityComparison(ICmpInst &I) {
1190 IRBuilder<> IRB(&I);
1191 Value *A = I.getOperand(0);
1192 Value *B = I.getOperand(1);
1193 Value *Sa = getShadow(A);
1194 Value *Sb = getShadow(B);
1196 // Get rid of pointers and vectors of pointers.
1197 // For ints (and vectors of ints), types of A and Sa match,
1198 // and this is a no-op.
1199 A = IRB.CreatePointerCast(A, Sa->getType());
1200 B = IRB.CreatePointerCast(B, Sb->getType());
1202 // A == B <==> (C = A^B) == 0
1203 // A != B <==> (C = A^B) != 0
1205 Value *C = IRB.CreateXor(A, B);
1206 Value *Sc = IRB.CreateOr(Sa, Sb);
1207 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1208 // Result is defined if one of the following is true
1209 // * there is a defined 1 bit in C
1210 // * C is fully defined
1211 // Si = !(C & ~Sc) && Sc
1212 Value *Zero = Constant::getNullValue(Sc->getType());
1213 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1215 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1217 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1218 Si->setName("_msprop_icmp");
1220 setOriginForNaryOp(I);
1223 /// \brief Build the lowest possible value of V, taking into account V's
1224 /// uninitialized bits.
1225 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1228 // Split shadow into sign bit and other bits.
1229 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1230 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1231 // Maximise the undefined shadow bit, minimize other undefined bits.
1233 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1235 // Minimize undefined bits.
1236 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1240 /// \brief Build the highest possible value of V, taking into account V's
1241 /// uninitialized bits.
1242 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1245 // Split shadow into sign bit and other bits.
1246 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1247 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1248 // Minimise the undefined shadow bit, maximise other undefined bits.
1250 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1252 // Maximize undefined bits.
1253 return IRB.CreateOr(A, Sa);
1257 /// \brief Instrument relational comparisons.
1259 /// This function does exact shadow propagation for all relational
1260 /// comparisons of integers, pointers and vectors of those.
1261 /// FIXME: output seems suboptimal when one of the operands is a constant
1262 void handleRelationalComparisonExact(ICmpInst &I) {
1263 IRBuilder<> IRB(&I);
1264 Value *A = I.getOperand(0);
1265 Value *B = I.getOperand(1);
1266 Value *Sa = getShadow(A);
1267 Value *Sb = getShadow(B);
1269 // Get rid of pointers and vectors of pointers.
1270 // For ints (and vectors of ints), types of A and Sa match,
1271 // and this is a no-op.
1272 A = IRB.CreatePointerCast(A, Sa->getType());
1273 B = IRB.CreatePointerCast(B, Sb->getType());
1275 // Let [a0, a1] be the interval of possible values of A, taking into account
1276 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1277 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1278 bool IsSigned = I.isSigned();
1279 Value *S1 = IRB.CreateICmp(I.getPredicate(),
1280 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1281 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1282 Value *S2 = IRB.CreateICmp(I.getPredicate(),
1283 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1284 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1285 Value *Si = IRB.CreateXor(S1, S2);
1287 setOriginForNaryOp(I);
1290 /// \brief Instrument signed relational comparisons.
1292 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by
1293 /// propagating the highest bit of the shadow. Everything else is delegated
1294 /// to handleShadowOr().
1295 void handleSignedRelationalComparison(ICmpInst &I) {
1296 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1297 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1299 CmpInst::Predicate pre = I.getPredicate();
1300 if (constOp0 && constOp0->isNullValue() &&
1301 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
1302 op = I.getOperand(1);
1303 } else if (constOp1 && constOp1->isNullValue() &&
1304 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) {
1305 op = I.getOperand(0);
1308 IRBuilder<> IRB(&I);
1310 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt");
1311 setShadow(&I, Shadow);
1312 setOrigin(&I, getOrigin(op));
1318 void visitICmpInst(ICmpInst &I) {
1319 if (!ClHandleICmp) {
1323 if (I.isEquality()) {
1324 handleEqualityComparison(I);
1328 assert(I.isRelational());
1329 if (ClHandleICmpExact) {
1330 handleRelationalComparisonExact(I);
1334 handleSignedRelationalComparison(I);
1338 assert(I.isUnsigned());
1339 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
1340 handleRelationalComparisonExact(I);
1347 void visitFCmpInst(FCmpInst &I) {
1351 void handleShift(BinaryOperator &I) {
1352 IRBuilder<> IRB(&I);
1353 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1354 // Otherwise perform the same shift on S1.
1355 Value *S1 = getShadow(&I, 0);
1356 Value *S2 = getShadow(&I, 1);
1357 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1359 Value *V2 = I.getOperand(1);
1360 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1361 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1362 setOriginForNaryOp(I);
1365 void visitShl(BinaryOperator &I) { handleShift(I); }
1366 void visitAShr(BinaryOperator &I) { handleShift(I); }
1367 void visitLShr(BinaryOperator &I) { handleShift(I); }
1369 /// \brief Instrument llvm.memmove
1371 /// At this point we don't know if llvm.memmove will be inlined or not.
1372 /// If we don't instrument it and it gets inlined,
1373 /// our interceptor will not kick in and we will lose the memmove.
1374 /// If we instrument the call here, but it does not get inlined,
1375 /// we will memove the shadow twice: which is bad in case
1376 /// of overlapping regions. So, we simply lower the intrinsic to a call.
1378 /// Similar situation exists for memcpy and memset.
1379 void visitMemMoveInst(MemMoveInst &I) {
1380 IRBuilder<> IRB(&I);
1383 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1384 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1385 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1386 I.eraseFromParent();
1389 // Similar to memmove: avoid copying shadow twice.
1390 // This is somewhat unfortunate as it may slowdown small constant memcpys.
1391 // FIXME: consider doing manual inline for small constant sizes and proper
1393 void visitMemCpyInst(MemCpyInst &I) {
1394 IRBuilder<> IRB(&I);
1397 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1398 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1399 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1400 I.eraseFromParent();
1404 void visitMemSetInst(MemSetInst &I) {
1405 IRBuilder<> IRB(&I);
1408 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1409 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1410 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1411 I.eraseFromParent();
1414 void visitVAStartInst(VAStartInst &I) {
1415 VAHelper->visitVAStartInst(I);
1418 void visitVACopyInst(VACopyInst &I) {
1419 VAHelper->visitVACopyInst(I);
1422 enum IntrinsicKind {
1423 IK_DoesNotAccessMemory,
1428 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) {
1429 const int DoesNotAccessMemory = IK_DoesNotAccessMemory;
1430 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory;
1431 const int OnlyReadsMemory = IK_OnlyReadsMemory;
1432 const int OnlyAccessesArgumentPointees = IK_WritesMemory;
1433 const int UnknownModRefBehavior = IK_WritesMemory;
1434 #define GET_INTRINSIC_MODREF_BEHAVIOR
1435 #define ModRefBehavior IntrinsicKind
1436 #include "llvm/IR/Intrinsics.gen"
1437 #undef ModRefBehavior
1438 #undef GET_INTRINSIC_MODREF_BEHAVIOR
1441 /// \brief Handle vector store-like intrinsics.
1443 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1444 /// has 1 pointer argument and 1 vector argument, returns void.
1445 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1446 IRBuilder<> IRB(&I);
1447 Value* Addr = I.getArgOperand(0);
1448 Value *Shadow = getShadow(&I, 1);
1449 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1451 // We don't know the pointer alignment (could be unaligned SSE store!).
1452 // Have to assume to worst case.
1453 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1455 if (ClCheckAccessAddress)
1456 insertCheck(Addr, &I);
1458 // FIXME: use ClStoreCleanOrigin
1459 // FIXME: factor out common code from materializeStores
1460 if (MS.TrackOrigins)
1461 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB));
1465 /// \brief Handle vector load-like intrinsics.
1467 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1468 /// has 1 pointer argument, returns a vector.
1469 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1470 IRBuilder<> IRB(&I);
1471 Value *Addr = I.getArgOperand(0);
1473 Type *ShadowTy = getShadowTy(&I);
1475 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1476 // We don't know the pointer alignment (could be unaligned SSE load!).
1477 // Have to assume to worst case.
1478 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1480 setShadow(&I, getCleanShadow(&I));
1484 if (ClCheckAccessAddress)
1485 insertCheck(Addr, &I);
1487 if (MS.TrackOrigins) {
1489 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
1491 setOrigin(&I, getCleanOrigin());
1496 /// \brief Handle (SIMD arithmetic)-like intrinsics.
1498 /// Instrument intrinsics with any number of arguments of the same type,
1499 /// equal to the return type. The type should be simple (no aggregates or
1500 /// pointers; vectors are fine).
1501 /// Caller guarantees that this intrinsic does not access memory.
1502 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1503 Type *RetTy = I.getType();
1504 if (!(RetTy->isIntOrIntVectorTy() ||
1505 RetTy->isFPOrFPVectorTy() ||
1506 RetTy->isX86_MMXTy()))
1509 unsigned NumArgOperands = I.getNumArgOperands();
1511 for (unsigned i = 0; i < NumArgOperands; ++i) {
1512 Type *Ty = I.getArgOperand(i)->getType();
1517 IRBuilder<> IRB(&I);
1518 ShadowAndOriginCombiner SC(this, IRB);
1519 for (unsigned i = 0; i < NumArgOperands; ++i)
1520 SC.Add(I.getArgOperand(i));
1526 /// \brief Heuristically instrument unknown intrinsics.
1528 /// The main purpose of this code is to do something reasonable with all
1529 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
1530 /// We recognize several classes of intrinsics by their argument types and
1531 /// ModRefBehaviour and apply special intrumentation when we are reasonably
1532 /// sure that we know what the intrinsic does.
1534 /// We special-case intrinsics where this approach fails. See llvm.bswap
1535 /// handling as an example of that.
1536 bool handleUnknownIntrinsic(IntrinsicInst &I) {
1537 unsigned NumArgOperands = I.getNumArgOperands();
1538 if (NumArgOperands == 0)
1541 Intrinsic::ID iid = I.getIntrinsicID();
1542 IntrinsicKind IK = getIntrinsicKind(iid);
1543 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory;
1544 bool WritesMemory = IK == IK_WritesMemory;
1545 assert(!(OnlyReadsMemory && WritesMemory));
1547 if (NumArgOperands == 2 &&
1548 I.getArgOperand(0)->getType()->isPointerTy() &&
1549 I.getArgOperand(1)->getType()->isVectorTy() &&
1550 I.getType()->isVoidTy() &&
1552 // This looks like a vector store.
1553 return handleVectorStoreIntrinsic(I);
1556 if (NumArgOperands == 1 &&
1557 I.getArgOperand(0)->getType()->isPointerTy() &&
1558 I.getType()->isVectorTy() &&
1560 // This looks like a vector load.
1561 return handleVectorLoadIntrinsic(I);
1564 if (!OnlyReadsMemory && !WritesMemory)
1565 if (maybeHandleSimpleNomemIntrinsic(I))
1568 // FIXME: detect and handle SSE maskstore/maskload
1572 void handleBswap(IntrinsicInst &I) {
1573 IRBuilder<> IRB(&I);
1574 Value *Op = I.getArgOperand(0);
1575 Type *OpType = Op->getType();
1576 Function *BswapFunc = Intrinsic::getDeclaration(
1577 F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1));
1578 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
1579 setOrigin(&I, getOrigin(Op));
1582 void visitIntrinsicInst(IntrinsicInst &I) {
1583 switch (I.getIntrinsicID()) {
1584 case llvm::Intrinsic::bswap:
1588 if (!handleUnknownIntrinsic(I))
1589 visitInstruction(I);
1594 // Replace call to (*Fn) with a call to (*IndirectCallWrapperFn(Fn)).
1595 void wrapIndirectCall(IRBuilder<> &IRB, CallSite CS) {
1596 Value *Fn = CS.getCalledValue();
1597 Value *NewFn = IRB.CreateBitCast(
1598 IRB.CreateCall(MS.IndirectCallWrapperFn,
1599 IRB.CreateBitCast(Fn, MS.AnyFunctionPtrTy)),
1601 setShadow(NewFn, getShadow(Fn));
1602 CS.setCalledFunction(NewFn);
1605 void visitCallSite(CallSite CS) {
1606 Instruction &I = *CS.getInstruction();
1607 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
1609 CallInst *Call = cast<CallInst>(&I);
1611 // For inline asm, do the usual thing: check argument shadow and mark all
1612 // outputs as clean. Note that any side effects of the inline asm that are
1613 // not immediately visible in its constraints are not handled.
1614 if (Call->isInlineAsm()) {
1615 visitInstruction(I);
1619 // Allow only tail calls with the same types, otherwise
1620 // we may have a false positive: shadow for a non-void RetVal
1621 // will get propagated to a void RetVal.
1622 if (Call->isTailCall() && Call->getType() != Call->getParent()->getType())
1623 Call->setTailCall(false);
1625 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
1627 // We are going to insert code that relies on the fact that the callee
1628 // will become a non-readonly function after it is instrumented by us. To
1629 // prevent this code from being optimized out, mark that function
1630 // non-readonly in advance.
1631 if (Function *Func = Call->getCalledFunction()) {
1632 // Clear out readonly/readnone attributes.
1634 B.addAttribute(Attribute::ReadOnly)
1635 .addAttribute(Attribute::ReadNone);
1636 Func->removeAttributes(AttributeSet::FunctionIndex,
1637 AttributeSet::get(Func->getContext(),
1638 AttributeSet::FunctionIndex,
1642 IRBuilder<> IRB(&I);
1644 if (MS.WrapIndirectCalls && !CS.getCalledFunction())
1645 wrapIndirectCall(IRB, CS);
1647 unsigned ArgOffset = 0;
1648 DEBUG(dbgs() << " CallSite: " << I << "\n");
1649 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
1650 ArgIt != End; ++ArgIt) {
1652 unsigned i = ArgIt - CS.arg_begin();
1653 if (!A->getType()->isSized()) {
1654 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
1659 // Compute the Shadow for arg even if it is ByVal, because
1660 // in that case getShadow() will copy the actual arg shadow to
1661 // __msan_param_tls.
1662 Value *ArgShadow = getShadow(A);
1663 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
1664 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
1665 " Shadow: " << *ArgShadow << "\n");
1666 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
1667 assert(A->getType()->isPointerTy() &&
1668 "ByVal argument is not a pointer!");
1669 Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType());
1670 unsigned Alignment = CS.getParamAlignment(i + 1);
1671 Store = IRB.CreateMemCpy(ArgShadowBase,
1672 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
1675 Size = MS.TD->getTypeAllocSize(A->getType());
1676 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
1677 kShadowTLSAlignment);
1679 if (MS.TrackOrigins)
1680 IRB.CreateStore(getOrigin(A),
1681 getOriginPtrForArgument(A, IRB, ArgOffset));
1683 assert(Size != 0 && Store != 0);
1684 DEBUG(dbgs() << " Param:" << *Store << "\n");
1685 ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
1687 DEBUG(dbgs() << " done with call args\n");
1690 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
1691 if (FT->isVarArg()) {
1692 VAHelper->visitCallSite(CS, IRB);
1695 // Now, get the shadow for the RetVal.
1696 if (!I.getType()->isSized()) return;
1697 IRBuilder<> IRBBefore(&I);
1698 // Untill we have full dynamic coverage, make sure the retval shadow is 0.
1699 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
1700 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
1701 Instruction *NextInsn = 0;
1703 NextInsn = I.getNextNode();
1705 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
1706 if (!NormalDest->getSinglePredecessor()) {
1707 // FIXME: this case is tricky, so we are just conservative here.
1708 // Perhaps we need to split the edge between this BB and NormalDest,
1709 // but a naive attempt to use SplitEdge leads to a crash.
1710 setShadow(&I, getCleanShadow(&I));
1711 setOrigin(&I, getCleanOrigin());
1714 NextInsn = NormalDest->getFirstInsertionPt();
1716 "Could not find insertion point for retval shadow load");
1718 IRBuilder<> IRBAfter(NextInsn);
1719 Value *RetvalShadow =
1720 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
1721 kShadowTLSAlignment, "_msret");
1722 setShadow(&I, RetvalShadow);
1723 if (MS.TrackOrigins)
1724 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
1727 void visitReturnInst(ReturnInst &I) {
1728 IRBuilder<> IRB(&I);
1729 Value *RetVal = I.getReturnValue();
1730 if (!RetVal) return;
1731 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
1732 if (CheckReturnValue) {
1733 insertCheck(RetVal, &I);
1734 Value *Shadow = getCleanShadow(RetVal);
1735 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
1737 Value *Shadow = getShadow(RetVal);
1738 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
1739 // FIXME: make it conditional if ClStoreCleanOrigin==0
1740 if (MS.TrackOrigins)
1741 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
1745 void visitPHINode(PHINode &I) {
1746 IRBuilder<> IRB(&I);
1747 ShadowPHINodes.push_back(&I);
1748 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
1750 if (MS.TrackOrigins)
1751 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
1755 void visitAllocaInst(AllocaInst &I) {
1756 setShadow(&I, getCleanShadow(&I));
1757 IRBuilder<> IRB(I.getNextNode());
1758 uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType());
1759 if (PoisonStack && ClPoisonStackWithCall) {
1760 IRB.CreateCall2(MS.MsanPoisonStackFn,
1761 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
1762 ConstantInt::get(MS.IntptrTy, Size));
1764 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
1765 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
1766 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment());
1769 if (PoisonStack && MS.TrackOrigins) {
1770 setOrigin(&I, getCleanOrigin());
1771 SmallString<2048> StackDescriptionStorage;
1772 raw_svector_ostream StackDescription(StackDescriptionStorage);
1773 // We create a string with a description of the stack allocation and
1774 // pass it into __msan_set_alloca_origin.
1775 // It will be printed by the run-time if stack-originated UMR is found.
1776 // The first 4 bytes of the string are set to '----' and will be replaced
1777 // by __msan_va_arg_overflow_size_tls at the first call.
1778 StackDescription << "----" << I.getName() << "@" << F.getName();
1780 createPrivateNonConstGlobalForString(*F.getParent(),
1781 StackDescription.str());
1783 IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn,
1784 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
1785 ConstantInt::get(MS.IntptrTy, Size),
1786 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
1787 IRB.CreatePointerCast(&F, MS.IntptrTy));
1791 void visitSelectInst(SelectInst& I) {
1792 IRBuilder<> IRB(&I);
1793 // a = select b, c, d
1794 Value *S = IRB.CreateSelect(I.getCondition(), getShadow(I.getTrueValue()),
1795 getShadow(I.getFalseValue()));
1796 if (I.getType()->isAggregateType()) {
1797 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
1798 // an extra "select". This results in much more compact IR.
1799 // Sa = select Sb, poisoned, (select b, Sc, Sd)
1800 S = IRB.CreateSelect(getShadow(I.getCondition()),
1801 getPoisonedShadow(getShadowTy(I.getType())), S,
1802 "_msprop_select_agg");
1804 // Sa = (sext Sb) | (select b, Sc, Sd)
1806 S, IRB.CreateSExt(getShadow(I.getCondition()), S->getType()),
1810 if (MS.TrackOrigins) {
1811 // Origins are always i32, so any vector conditions must be flattened.
1812 // FIXME: consider tracking vector origins for app vectors?
1813 Value *Cond = I.getCondition();
1814 if (Cond->getType()->isVectorTy()) {
1815 Value *ConvertedShadow = convertToShadowTyNoVec(Cond, IRB);
1816 Cond = IRB.CreateICmpNE(ConvertedShadow,
1817 getCleanShadow(ConvertedShadow), "_mso_select");
1819 setOrigin(&I, IRB.CreateSelect(Cond,
1820 getOrigin(I.getTrueValue()), getOrigin(I.getFalseValue())));
1824 void visitLandingPadInst(LandingPadInst &I) {
1826 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
1827 setShadow(&I, getCleanShadow(&I));
1828 setOrigin(&I, getCleanOrigin());
1831 void visitGetElementPtrInst(GetElementPtrInst &I) {
1835 void visitExtractValueInst(ExtractValueInst &I) {
1836 IRBuilder<> IRB(&I);
1837 Value *Agg = I.getAggregateOperand();
1838 DEBUG(dbgs() << "ExtractValue: " << I << "\n");
1839 Value *AggShadow = getShadow(Agg);
1840 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
1841 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
1842 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
1843 setShadow(&I, ResShadow);
1844 setOrigin(&I, getCleanOrigin());
1847 void visitInsertValueInst(InsertValueInst &I) {
1848 IRBuilder<> IRB(&I);
1849 DEBUG(dbgs() << "InsertValue: " << I << "\n");
1850 Value *AggShadow = getShadow(I.getAggregateOperand());
1851 Value *InsShadow = getShadow(I.getInsertedValueOperand());
1852 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
1853 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
1854 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
1855 DEBUG(dbgs() << " Res: " << *Res << "\n");
1857 setOrigin(&I, getCleanOrigin());
1860 void dumpInst(Instruction &I) {
1861 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1862 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
1864 errs() << "ZZZ " << I.getOpcodeName() << "\n";
1866 errs() << "QQQ " << I << "\n";
1869 void visitResumeInst(ResumeInst &I) {
1870 DEBUG(dbgs() << "Resume: " << I << "\n");
1871 // Nothing to do here.
1874 void visitInstruction(Instruction &I) {
1875 // Everything else: stop propagating and check for poisoned shadow.
1876 if (ClDumpStrictInstructions)
1878 DEBUG(dbgs() << "DEFAULT: " << I << "\n");
1879 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
1880 insertCheck(I.getOperand(i), &I);
1881 setShadow(&I, getCleanShadow(&I));
1882 setOrigin(&I, getCleanOrigin());
1886 /// \brief AMD64-specific implementation of VarArgHelper.
1887 struct VarArgAMD64Helper : public VarArgHelper {
1888 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
1889 // See a comment in visitCallSite for more details.
1890 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
1891 static const unsigned AMD64FpEndOffset = 176;
1894 MemorySanitizer &MS;
1895 MemorySanitizerVisitor &MSV;
1896 Value *VAArgTLSCopy;
1897 Value *VAArgOverflowSize;
1899 SmallVector<CallInst*, 16> VAStartInstrumentationList;
1901 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
1902 MemorySanitizerVisitor &MSV)
1903 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { }
1905 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
1907 ArgKind classifyArgument(Value* arg) {
1908 // A very rough approximation of X86_64 argument classification rules.
1909 Type *T = arg->getType();
1910 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
1911 return AK_FloatingPoint;
1912 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
1913 return AK_GeneralPurpose;
1914 if (T->isPointerTy())
1915 return AK_GeneralPurpose;
1919 // For VarArg functions, store the argument shadow in an ABI-specific format
1920 // that corresponds to va_list layout.
1921 // We do this because Clang lowers va_arg in the frontend, and this pass
1922 // only sees the low level code that deals with va_list internals.
1923 // A much easier alternative (provided that Clang emits va_arg instructions)
1924 // would have been to associate each live instance of va_list with a copy of
1925 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
1927 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {
1928 unsigned GpOffset = 0;
1929 unsigned FpOffset = AMD64GpEndOffset;
1930 unsigned OverflowOffset = AMD64FpEndOffset;
1931 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
1932 ArgIt != End; ++ArgIt) {
1934 ArgKind AK = classifyArgument(A);
1935 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
1937 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
1941 case AK_GeneralPurpose:
1942 Base = getShadowPtrForVAArgument(A, IRB, GpOffset);
1945 case AK_FloatingPoint:
1946 Base = getShadowPtrForVAArgument(A, IRB, FpOffset);
1950 uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType());
1951 Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset);
1952 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
1954 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
1956 Constant *OverflowSize =
1957 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
1958 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
1961 /// \brief Compute the shadow address for a given va_arg.
1962 Value *getShadowPtrForVAArgument(Value *A, IRBuilder<> &IRB,
1964 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
1965 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1966 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(A), 0),
1970 void visitVAStartInst(VAStartInst &I) {
1971 IRBuilder<> IRB(&I);
1972 VAStartInstrumentationList.push_back(&I);
1973 Value *VAListTag = I.getArgOperand(0);
1974 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
1976 // Unpoison the whole __va_list_tag.
1977 // FIXME: magic ABI constants.
1978 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
1979 /* size */24, /* alignment */8, false);
1982 void visitVACopyInst(VACopyInst &I) {
1983 IRBuilder<> IRB(&I);
1984 Value *VAListTag = I.getArgOperand(0);
1985 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
1987 // Unpoison the whole __va_list_tag.
1988 // FIXME: magic ABI constants.
1989 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
1990 /* size */24, /* alignment */8, false);
1993 void finalizeInstrumentation() {
1994 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
1995 "finalizeInstrumentation called twice");
1996 if (!VAStartInstrumentationList.empty()) {
1997 // If there is a va_start in this function, make a backup copy of
1998 // va_arg_tls somewhere in the function entry block.
1999 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
2000 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
2002 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
2004 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
2005 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
2008 // Instrument va_start.
2009 // Copy va_list shadow from the backup copy of the TLS contents.
2010 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
2011 CallInst *OrigInst = VAStartInstrumentationList[i];
2012 IRBuilder<> IRB(OrigInst->getNextNode());
2013 Value *VAListTag = OrigInst->getArgOperand(0);
2015 Value *RegSaveAreaPtrPtr =
2017 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2018 ConstantInt::get(MS.IntptrTy, 16)),
2019 Type::getInt64PtrTy(*MS.C));
2020 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
2021 Value *RegSaveAreaShadowPtr =
2022 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
2023 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
2024 AMD64FpEndOffset, 16);
2026 Value *OverflowArgAreaPtrPtr =
2028 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2029 ConstantInt::get(MS.IntptrTy, 8)),
2030 Type::getInt64PtrTy(*MS.C));
2031 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
2032 Value *OverflowArgAreaShadowPtr =
2033 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
2034 Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset);
2035 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
2040 /// \brief A no-op implementation of VarArgHelper.
2041 struct VarArgNoOpHelper : public VarArgHelper {
2042 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
2043 MemorySanitizerVisitor &MSV) {}
2045 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {}
2047 void visitVAStartInst(VAStartInst &I) {}
2049 void visitVACopyInst(VACopyInst &I) {}
2051 void finalizeInstrumentation() {}
2054 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
2055 MemorySanitizerVisitor &Visitor) {
2056 // VarArg handling is only implemented on AMD64. False positives are possible
2057 // on other platforms.
2058 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
2059 if (TargetTriple.getArch() == llvm::Triple::x86_64)
2060 return new VarArgAMD64Helper(Func, Msan, Visitor);
2062 return new VarArgNoOpHelper(Func, Msan, Visitor);
2067 bool MemorySanitizer::runOnFunction(Function &F) {
2068 MemorySanitizerVisitor Visitor(F, *this);
2070 // Clear out readonly/readnone attributes.
2072 B.addAttribute(Attribute::ReadOnly)
2073 .addAttribute(Attribute::ReadNone);
2074 F.removeAttributes(AttributeSet::FunctionIndex,
2075 AttributeSet::get(F.getContext(),
2076 AttributeSet::FunctionIndex, B));
2078 return Visitor.runOnFunction();