1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
13 /// Status: early prototype.
15 /// The algorithm of the tool is similar to Memcheck
16 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
17 /// byte of the application memory, poison the shadow of the malloc-ed
18 /// or alloca-ed memory, load the shadow bits on every memory read,
19 /// propagate the shadow bits through some of the arithmetic
20 /// instruction (including MOV), store the shadow bits on every memory
21 /// write, report a bug on some other instructions (e.g. JMP) if the
22 /// associated shadow is poisoned.
24 /// But there are differences too. The first and the major one:
25 /// compiler instrumentation instead of binary instrumentation. This
26 /// gives us much better register allocation, possible compiler
27 /// optimizations and a fast start-up. But this brings the major issue
28 /// as well: msan needs to see all program events, including system
29 /// calls and reads/writes in system libraries, so we either need to
30 /// compile *everything* with msan or use a binary translation
31 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
32 /// Another difference from Memcheck is that we use 8 shadow bits per
33 /// byte of application memory and use a direct shadow mapping. This
34 /// greatly simplifies the instrumentation code and avoids races on
35 /// shadow updates (Memcheck is single-threaded so races are not a
36 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
37 /// path storage that uses 8 bits per byte).
39 /// The default value of shadow is 0, which means "clean" (not poisoned).
41 /// Every module initializer should call __msan_init to ensure that the
42 /// shadow memory is ready. On error, __msan_warning is called. Since
43 /// parameters and return values may be passed via registers, we have a
44 /// specialized thread-local shadow for return values
45 /// (__msan_retval_tls) and parameters (__msan_param_tls).
49 /// MemorySanitizer can track origins (allocation points) of all uninitialized
50 /// values. This behavior is controlled with a flag (msan-track-origins) and is
51 /// disabled by default.
53 /// Origins are 4-byte values created and interpreted by the runtime library.
54 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
55 /// of application memory. Propagation of origins is basically a bunch of
56 /// "select" instructions that pick the origin of a dirty argument, if an
57 /// instruction has one.
59 /// Every 4 aligned, consecutive bytes of application memory have one origin
60 /// value associated with them. If these bytes contain uninitialized data
61 /// coming from 2 different allocations, the last store wins. Because of this,
62 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
65 /// Origins are meaningless for fully initialized values, so MemorySanitizer
66 /// avoids storing origin to memory when a fully initialized value is stored.
67 /// This way it avoids needless overwritting origin of the 4-byte region on
68 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
72 /// Ideally, every atomic store of application value should update the
73 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
74 /// of two disjoint locations can not be done without severe slowdown.
76 /// Therefore, we implement an approximation that may err on the safe side.
77 /// In this implementation, every atomically accessed location in the program
78 /// may only change from (partially) uninitialized to fully initialized, but
79 /// not the other way around. We load the shadow _after_ the application load,
80 /// and we store the shadow _before_ the app store. Also, we always store clean
81 /// shadow (if the application store is atomic). This way, if the store-load
82 /// pair constitutes a happens-before arc, shadow store and load are correctly
83 /// ordered such that the load will get either the value that was stored, or
84 /// some later value (which is always clean).
86 /// This does not work very well with Compare-And-Swap (CAS) and
87 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
88 /// must store the new shadow before the app operation, and load the shadow
89 /// after the app operation. Computers don't work this way. Current
90 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
91 /// value. It implements the store part as a simple atomic store by storing a
94 //===----------------------------------------------------------------------===//
96 #define DEBUG_TYPE "msan"
98 #include "llvm/Transforms/Instrumentation.h"
99 #include "llvm/ADT/DepthFirstIterator.h"
100 #include "llvm/ADT/SmallString.h"
101 #include "llvm/ADT/SmallVector.h"
102 #include "llvm/ADT/Triple.h"
103 #include "llvm/ADT/ValueMap.h"
104 #include "llvm/IR/DataLayout.h"
105 #include "llvm/IR/Function.h"
106 #include "llvm/IR/IRBuilder.h"
107 #include "llvm/IR/InlineAsm.h"
108 #include "llvm/IR/IntrinsicInst.h"
109 #include "llvm/IR/LLVMContext.h"
110 #include "llvm/IR/MDBuilder.h"
111 #include "llvm/IR/Module.h"
112 #include "llvm/IR/Type.h"
113 #include "llvm/InstVisitor.h"
114 #include "llvm/Support/CommandLine.h"
115 #include "llvm/Support/Compiler.h"
116 #include "llvm/Support/Debug.h"
117 #include "llvm/Support/raw_ostream.h"
118 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
119 #include "llvm/Transforms/Utils/Local.h"
120 #include "llvm/Transforms/Utils/ModuleUtils.h"
121 #include "llvm/Transforms/Utils/SpecialCaseList.h"
123 using namespace llvm;
125 static const uint64_t kShadowMask32 = 1ULL << 31;
126 static const uint64_t kShadowMask64 = 1ULL << 46;
127 static const uint64_t kOriginOffset32 = 1ULL << 30;
128 static const uint64_t kOriginOffset64 = 1ULL << 45;
129 static const unsigned kMinOriginAlignment = 4;
130 static const unsigned kShadowTLSAlignment = 8;
132 /// \brief Track origins of uninitialized values.
134 /// Adds a section to MemorySanitizer report that points to the allocation
135 /// (stack or heap) the uninitialized bits came from originally.
136 static cl::opt<bool> ClTrackOrigins("msan-track-origins",
137 cl::desc("Track origins (allocation sites) of poisoned memory"),
138 cl::Hidden, cl::init(false));
139 static cl::opt<bool> ClKeepGoing("msan-keep-going",
140 cl::desc("keep going after reporting a UMR"),
141 cl::Hidden, cl::init(false));
142 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
143 cl::desc("poison uninitialized stack variables"),
144 cl::Hidden, cl::init(true));
145 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
146 cl::desc("poison uninitialized stack variables with a call"),
147 cl::Hidden, cl::init(false));
148 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
149 cl::desc("poison uninitialized stack variables with the given patter"),
150 cl::Hidden, cl::init(0xff));
151 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
152 cl::desc("poison undef temps"),
153 cl::Hidden, cl::init(true));
155 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
156 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
157 cl::Hidden, cl::init(true));
159 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
160 cl::desc("exact handling of relational integer ICmp"),
161 cl::Hidden, cl::init(false));
163 static cl::opt<bool> ClStoreCleanOrigin("msan-store-clean-origin",
164 cl::desc("store origin for clean (fully initialized) values"),
165 cl::Hidden, cl::init(false));
167 // This flag controls whether we check the shadow of the address
168 // operand of load or store. Such bugs are very rare, since load from
169 // a garbage address typically results in SEGV, but still happen
170 // (e.g. only lower bits of address are garbage, or the access happens
171 // early at program startup where malloc-ed memory is more likely to
172 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
173 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
174 cl::desc("report accesses through a pointer which has poisoned shadow"),
175 cl::Hidden, cl::init(true));
177 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
178 cl::desc("print out instructions with default strict semantics"),
179 cl::Hidden, cl::init(false));
181 static cl::opt<std::string> ClBlacklistFile("msan-blacklist",
182 cl::desc("File containing the list of functions where MemorySanitizer "
183 "should not report bugs"), cl::Hidden);
185 // Experimental. Wraps all indirect calls in the instrumented code with
186 // a call to the given function. This is needed to assist the dynamic
187 // helper tool (MSanDR) to regain control on transition between instrumented and
188 // non-instrumented code.
189 static cl::opt<std::string> ClWrapIndirectCalls("msan-wrap-indirect-calls",
190 cl::desc("Wrap indirect calls with a given function"),
195 /// \brief An instrumentation pass implementing detection of uninitialized
198 /// MemorySanitizer: instrument the code in module to find
199 /// uninitialized reads.
200 class MemorySanitizer : public FunctionPass {
202 MemorySanitizer(bool TrackOrigins = false,
203 StringRef BlacklistFile = StringRef())
205 TrackOrigins(TrackOrigins || ClTrackOrigins),
208 BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile),
209 WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
210 const char *getPassName() const { return "MemorySanitizer"; }
211 bool runOnFunction(Function &F);
212 bool doInitialization(Module &M);
213 static char ID; // Pass identification, replacement for typeid.
216 void initializeCallbacks(Module &M);
218 /// \brief Track origins (allocation points) of uninitialized values.
225 /// \brief Thread-local shadow storage for function parameters.
226 GlobalVariable *ParamTLS;
227 /// \brief Thread-local origin storage for function parameters.
228 GlobalVariable *ParamOriginTLS;
229 /// \brief Thread-local shadow storage for function return value.
230 GlobalVariable *RetvalTLS;
231 /// \brief Thread-local origin storage for function return value.
232 GlobalVariable *RetvalOriginTLS;
233 /// \brief Thread-local shadow storage for in-register va_arg function
234 /// parameters (x86_64-specific).
235 GlobalVariable *VAArgTLS;
236 /// \brief Thread-local shadow storage for va_arg overflow area
237 /// (x86_64-specific).
238 GlobalVariable *VAArgOverflowSizeTLS;
239 /// \brief Thread-local space used to pass origin value to the UMR reporting
241 GlobalVariable *OriginTLS;
243 /// \brief The run-time callback to print a warning.
245 /// \brief Run-time helper that copies origin info for a memory range.
246 Value *MsanCopyOriginFn;
247 /// \brief Run-time helper that generates a new origin value for a stack
249 Value *MsanSetAllocaOrigin4Fn;
250 /// \brief Run-time helper that poisons stack on function entry.
251 Value *MsanPoisonStackFn;
252 /// \brief MSan runtime replacements for memmove, memcpy and memset.
253 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
255 /// \brief Address mask used in application-to-shadow address calculation.
256 /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask.
258 /// \brief Offset of the origin shadow from the "normal" shadow.
259 /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL
260 uint64_t OriginOffset;
261 /// \brief Branch weights for error reporting.
262 MDNode *ColdCallWeights;
263 /// \brief Branch weights for origin store.
264 MDNode *OriginStoreWeights;
265 /// \brief Path to blacklist file.
266 SmallString<64> BlacklistFile;
267 /// \brief The blacklist.
268 OwningPtr<SpecialCaseList> BL;
269 /// \brief An empty volatile inline asm that prevents callback merge.
272 bool WrapIndirectCalls;
273 /// \brief Run-time wrapper for indirect calls.
274 Value *IndirectCallWrapperFn;
275 // Argument and return type of IndirectCallWrapperFn: void (*f)(void).
276 Type *AnyFunctionPtrTy;
278 friend struct MemorySanitizerVisitor;
279 friend struct VarArgAMD64Helper;
283 char MemorySanitizer::ID = 0;
284 INITIALIZE_PASS(MemorySanitizer, "msan",
285 "MemorySanitizer: detects uninitialized reads.",
288 FunctionPass *llvm::createMemorySanitizerPass(bool TrackOrigins,
289 StringRef BlacklistFile) {
290 return new MemorySanitizer(TrackOrigins, BlacklistFile);
293 /// \brief Create a non-const global initialized with the given string.
295 /// Creates a writable global for Str so that we can pass it to the
296 /// run-time lib. Runtime uses first 4 bytes of the string to store the
297 /// frame ID, so the string needs to be mutable.
298 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
300 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
301 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
302 GlobalValue::PrivateLinkage, StrConst, "");
306 /// \brief Insert extern declaration of runtime-provided functions and globals.
307 void MemorySanitizer::initializeCallbacks(Module &M) {
308 // Only do this once.
313 // Create the callback.
314 // FIXME: this function should have "Cold" calling conv,
315 // which is not yet implemented.
316 StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
317 : "__msan_warning_noreturn";
318 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL);
320 MsanCopyOriginFn = M.getOrInsertFunction(
321 "__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(),
322 IRB.getInt8PtrTy(), IntptrTy, NULL);
323 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
324 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
325 IRB.getInt8PtrTy(), IntptrTy, NULL);
326 MsanPoisonStackFn = M.getOrInsertFunction(
327 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
328 MemmoveFn = M.getOrInsertFunction(
329 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
330 IRB.getInt8PtrTy(), IntptrTy, NULL);
331 MemcpyFn = M.getOrInsertFunction(
332 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
334 MemsetFn = M.getOrInsertFunction(
335 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
339 RetvalTLS = new GlobalVariable(
340 M, ArrayType::get(IRB.getInt64Ty(), 8), false,
341 GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0,
342 GlobalVariable::InitialExecTLSModel);
343 RetvalOriginTLS = new GlobalVariable(
344 M, OriginTy, false, GlobalVariable::ExternalLinkage, 0,
345 "__msan_retval_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
347 ParamTLS = new GlobalVariable(
348 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
349 GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0,
350 GlobalVariable::InitialExecTLSModel);
351 ParamOriginTLS = new GlobalVariable(
352 M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
353 0, "__msan_param_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
355 VAArgTLS = new GlobalVariable(
356 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
357 GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0,
358 GlobalVariable::InitialExecTLSModel);
359 VAArgOverflowSizeTLS = new GlobalVariable(
360 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0,
361 "__msan_va_arg_overflow_size_tls", 0,
362 GlobalVariable::InitialExecTLSModel);
363 OriginTLS = new GlobalVariable(
364 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0,
365 "__msan_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
367 // We insert an empty inline asm after __msan_report* to avoid callback merge.
368 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
369 StringRef(""), StringRef(""),
370 /*hasSideEffects=*/true);
372 if (WrapIndirectCalls) {
374 PointerType::getUnqual(FunctionType::get(IRB.getVoidTy(), false));
375 IndirectCallWrapperFn = M.getOrInsertFunction(
376 ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL);
380 /// \brief Module-level initialization.
382 /// inserts a call to __msan_init to the module's constructor list.
383 bool MemorySanitizer::doInitialization(Module &M) {
384 TD = getAnalysisIfAvailable<DataLayout>();
387 BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
388 C = &(M.getContext());
389 unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0);
392 ShadowMask = kShadowMask64;
393 OriginOffset = kOriginOffset64;
396 ShadowMask = kShadowMask32;
397 OriginOffset = kOriginOffset32;
400 report_fatal_error("unsupported pointer size");
405 IntptrTy = IRB.getIntPtrTy(TD);
406 OriginTy = IRB.getInt32Ty();
408 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
409 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
411 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
412 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
413 "__msan_init", IRB.getVoidTy(), NULL)), 0);
416 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
417 IRB.getInt32(TrackOrigins), "__msan_track_origins");
420 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
421 IRB.getInt32(ClKeepGoing), "__msan_keep_going");
428 /// \brief A helper class that handles instrumentation of VarArg
429 /// functions on a particular platform.
431 /// Implementations are expected to insert the instrumentation
432 /// necessary to propagate argument shadow through VarArg function
433 /// calls. Visit* methods are called during an InstVisitor pass over
434 /// the function, and should avoid creating new basic blocks. A new
435 /// instance of this class is created for each instrumented function.
436 struct VarArgHelper {
437 /// \brief Visit a CallSite.
438 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
440 /// \brief Visit a va_start call.
441 virtual void visitVAStartInst(VAStartInst &I) = 0;
443 /// \brief Visit a va_copy call.
444 virtual void visitVACopyInst(VACopyInst &I) = 0;
446 /// \brief Finalize function instrumentation.
448 /// This method is called after visiting all interesting (see above)
449 /// instructions in a function.
450 virtual void finalizeInstrumentation() = 0;
452 virtual ~VarArgHelper() {}
455 struct MemorySanitizerVisitor;
458 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
459 MemorySanitizerVisitor &Visitor);
461 /// This class does all the work for a given function. Store and Load
462 /// instructions store and load corresponding shadow and origin
463 /// values. Most instructions propagate shadow from arguments to their
464 /// return values. Certain instructions (most importantly, BranchInst)
465 /// test their argument shadow and print reports (with a runtime call) if it's
467 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
470 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
471 ValueMap<Value*, Value*> ShadowMap, OriginMap;
476 bool CheckReturnValue;
477 OwningPtr<VarArgHelper> VAHelper;
479 struct ShadowOriginAndInsertPoint {
482 Instruction *OrigIns;
483 ShadowOriginAndInsertPoint(Instruction *S, Instruction *O, Instruction *I)
484 : Shadow(S), Origin(O), OrigIns(I) { }
485 ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { }
487 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
488 SmallVector<Instruction*, 16> StoreList;
490 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
491 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
492 bool SanitizeFunction = !MS.BL->isIn(F) && F.getAttributes().hasAttribute(
493 AttributeSet::FunctionIndex,
494 Attribute::SanitizeMemory);
495 InsertChecks = SanitizeFunction;
496 LoadShadow = SanitizeFunction;
497 PoisonStack = SanitizeFunction && ClPoisonStack;
498 PoisonUndef = SanitizeFunction && ClPoisonUndef;
499 // FIXME: Consider using SpecialCaseList to specify a list of functions that
500 // must always return fully initialized values. For now, we hardcode "main".
501 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
503 DEBUG(if (!InsertChecks)
504 dbgs() << "MemorySanitizer is not inserting checks into '"
505 << F.getName() << "'\n");
508 void materializeStores() {
509 for (size_t i = 0, n = StoreList.size(); i < n; i++) {
510 StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]);
513 Value *Val = I.getValueOperand();
514 Value *Addr = I.getPointerOperand();
515 Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
516 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
519 IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment());
520 DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
523 if (ClCheckAccessAddress)
524 insertCheck(Addr, &I);
527 I.setOrdering(addReleaseOrdering(I.getOrdering()));
529 if (MS.TrackOrigins) {
530 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
531 if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) {
532 IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB),
535 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
537 Constant *Cst = dyn_cast_or_null<Constant>(ConvertedShadow);
538 // TODO(eugenis): handle non-zero constant shadow by inserting an
539 // unconditional check (can not simply fail compilation as this could
540 // be in the dead code).
544 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
545 getCleanShadow(ConvertedShadow), "_mscmp");
546 Instruction *CheckTerm =
547 SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false,
548 MS.OriginStoreWeights);
549 IRBuilder<> IRBNew(CheckTerm);
550 IRBNew.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRBNew),
557 void materializeChecks() {
558 for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
559 Instruction *Shadow = InstrumentationList[i].Shadow;
560 Instruction *OrigIns = InstrumentationList[i].OrigIns;
561 IRBuilder<> IRB(OrigIns);
562 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
563 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
564 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
565 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
566 getCleanShadow(ConvertedShadow), "_mscmp");
567 Instruction *CheckTerm =
568 SplitBlockAndInsertIfThen(cast<Instruction>(Cmp),
569 /* Unreachable */ !ClKeepGoing,
572 IRB.SetInsertPoint(CheckTerm);
573 if (MS.TrackOrigins) {
574 Instruction *Origin = InstrumentationList[i].Origin;
575 IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0),
578 CallInst *Call = IRB.CreateCall(MS.WarningFn);
579 Call->setDebugLoc(OrigIns->getDebugLoc());
580 IRB.CreateCall(MS.EmptyAsm);
581 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
583 DEBUG(dbgs() << "DONE:\n" << F);
586 /// \brief Add MemorySanitizer instrumentation to a function.
587 bool runOnFunction() {
588 MS.initializeCallbacks(*F.getParent());
589 if (!MS.TD) return false;
591 // In the presence of unreachable blocks, we may see Phi nodes with
592 // incoming nodes from such blocks. Since InstVisitor skips unreachable
593 // blocks, such nodes will not have any shadow value associated with them.
594 // It's easier to remove unreachable blocks than deal with missing shadow.
595 removeUnreachableBlocks(F);
597 // Iterate all BBs in depth-first order and create shadow instructions
598 // for all instructions (where applicable).
599 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
600 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
601 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
602 BasicBlock *BB = *DI;
606 // Finalize PHI nodes.
607 for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
608 PHINode *PN = ShadowPHINodes[i];
609 PHINode *PNS = cast<PHINode>(getShadow(PN));
610 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0;
611 size_t NumValues = PN->getNumIncomingValues();
612 for (size_t v = 0; v < NumValues; v++) {
613 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
615 PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
619 VAHelper->finalizeInstrumentation();
621 // Delayed instrumentation of StoreInst.
622 // This may add new checks to be inserted later.
625 // Insert shadow value checks.
631 /// \brief Compute the shadow type that corresponds to a given Value.
632 Type *getShadowTy(Value *V) {
633 return getShadowTy(V->getType());
636 /// \brief Compute the shadow type that corresponds to a given Type.
637 Type *getShadowTy(Type *OrigTy) {
638 if (!OrigTy->isSized()) {
641 // For integer type, shadow is the same as the original type.
642 // This may return weird-sized types like i1.
643 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
645 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
646 uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType());
647 return VectorType::get(IntegerType::get(*MS.C, EltSize),
648 VT->getNumElements());
650 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
651 SmallVector<Type*, 4> Elements;
652 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
653 Elements.push_back(getShadowTy(ST->getElementType(i)));
654 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
655 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
658 uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy);
659 return IntegerType::get(*MS.C, TypeSize);
662 /// \brief Flatten a vector type.
663 Type *getShadowTyNoVec(Type *ty) {
664 if (VectorType *vt = dyn_cast<VectorType>(ty))
665 return IntegerType::get(*MS.C, vt->getBitWidth());
669 /// \brief Convert a shadow value to it's flattened variant.
670 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
671 Type *Ty = V->getType();
672 Type *NoVecTy = getShadowTyNoVec(Ty);
673 if (Ty == NoVecTy) return V;
674 return IRB.CreateBitCast(V, NoVecTy);
677 /// \brief Compute the shadow address that corresponds to a given application
680 /// Shadow = Addr & ~ShadowMask.
681 Value *getShadowPtr(Value *Addr, Type *ShadowTy,
684 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
685 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
686 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
689 /// \brief Compute the origin address that corresponds to a given application
692 /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL
693 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) {
695 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
696 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
698 IRB.CreateAdd(ShadowLong,
699 ConstantInt::get(MS.IntptrTy, MS.OriginOffset));
701 IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL));
702 return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0));
705 /// \brief Compute the shadow address for a given function argument.
707 /// Shadow = ParamTLS+ArgOffset.
708 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
710 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
711 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
712 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
716 /// \brief Compute the origin address for a given function argument.
717 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
719 if (!MS.TrackOrigins) return 0;
720 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
721 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
722 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
726 /// \brief Compute the shadow address for a retval.
727 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
728 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
729 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
733 /// \brief Compute the origin address for a retval.
734 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
735 // We keep a single origin for the entire retval. Might be too optimistic.
736 return MS.RetvalOriginTLS;
739 /// \brief Set SV to be the shadow value for V.
740 void setShadow(Value *V, Value *SV) {
741 assert(!ShadowMap.count(V) && "Values may only have one shadow");
745 /// \brief Set Origin to be the origin value for V.
746 void setOrigin(Value *V, Value *Origin) {
747 if (!MS.TrackOrigins) return;
748 assert(!OriginMap.count(V) && "Values may only have one origin");
749 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
750 OriginMap[V] = Origin;
753 /// \brief Create a clean shadow value for a given value.
755 /// Clean shadow (all zeroes) means all bits of the value are defined
757 Constant *getCleanShadow(Value *V) {
758 Type *ShadowTy = getShadowTy(V);
761 return Constant::getNullValue(ShadowTy);
764 /// \brief Create a dirty shadow of a given shadow type.
765 Constant *getPoisonedShadow(Type *ShadowTy) {
767 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
768 return Constant::getAllOnesValue(ShadowTy);
769 StructType *ST = cast<StructType>(ShadowTy);
770 SmallVector<Constant *, 4> Vals;
771 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
772 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
773 return ConstantStruct::get(ST, Vals);
776 /// \brief Create a dirty shadow for a given value.
777 Constant *getPoisonedShadow(Value *V) {
778 Type *ShadowTy = getShadowTy(V);
781 return getPoisonedShadow(ShadowTy);
784 /// \brief Create a clean (zero) origin.
785 Value *getCleanOrigin() {
786 return Constant::getNullValue(MS.OriginTy);
789 /// \brief Get the shadow value for a given Value.
791 /// This function either returns the value set earlier with setShadow,
792 /// or extracts if from ParamTLS (for function arguments).
793 Value *getShadow(Value *V) {
794 if (Instruction *I = dyn_cast<Instruction>(V)) {
795 // For instructions the shadow is already stored in the map.
796 Value *Shadow = ShadowMap[V];
798 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
800 assert(Shadow && "No shadow for a value");
804 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
805 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
806 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
810 if (Argument *A = dyn_cast<Argument>(V)) {
811 // For arguments we compute the shadow on demand and store it in the map.
812 Value **ShadowPtr = &ShadowMap[V];
815 Function *F = A->getParent();
816 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
817 unsigned ArgOffset = 0;
818 for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
820 if (!AI->getType()->isSized()) {
821 DEBUG(dbgs() << "Arg is not sized\n");
824 unsigned Size = AI->hasByValAttr()
825 ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType())
826 : MS.TD->getTypeAllocSize(AI->getType());
828 Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
829 if (AI->hasByValAttr()) {
830 // ByVal pointer itself has clean shadow. We copy the actual
831 // argument shadow to the underlying memory.
832 // Figure out maximal valid memcpy alignment.
833 unsigned ArgAlign = AI->getParamAlignment();
835 Type *EltType = A->getType()->getPointerElementType();
836 ArgAlign = MS.TD->getABITypeAlignment(EltType);
838 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
839 Value *Cpy = EntryIRB.CreateMemCpy(
840 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
842 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
844 *ShadowPtr = getCleanShadow(V);
846 *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
848 DEBUG(dbgs() << " ARG: " << *AI << " ==> " <<
849 **ShadowPtr << "\n");
850 if (MS.TrackOrigins) {
851 Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset);
852 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
855 ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment);
857 assert(*ShadowPtr && "Could not find shadow for an argument");
860 // For everything else the shadow is zero.
861 return getCleanShadow(V);
864 /// \brief Get the shadow for i-th argument of the instruction I.
865 Value *getShadow(Instruction *I, int i) {
866 return getShadow(I->getOperand(i));
869 /// \brief Get the origin for a value.
870 Value *getOrigin(Value *V) {
871 if (!MS.TrackOrigins) return 0;
872 if (isa<Instruction>(V) || isa<Argument>(V)) {
873 Value *Origin = OriginMap[V];
875 DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n");
876 Origin = getCleanOrigin();
880 return getCleanOrigin();
883 /// \brief Get the origin for i-th argument of the instruction I.
884 Value *getOrigin(Instruction *I, int i) {
885 return getOrigin(I->getOperand(i));
888 /// \brief Remember the place where a shadow check should be inserted.
890 /// This location will be later instrumented with a check that will print a
891 /// UMR warning in runtime if the value is not fully defined.
892 void insertCheck(Value *Val, Instruction *OrigIns) {
894 if (!InsertChecks) return;
895 Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
898 Type *ShadowTy = Shadow->getType();
899 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
900 "Can only insert checks for integer and vector shadow types");
902 Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
903 InstrumentationList.push_back(
904 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
907 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
917 return AcquireRelease;
918 case SequentiallyConsistent:
919 return SequentiallyConsistent;
921 llvm_unreachable("Unknown ordering");
924 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
934 return AcquireRelease;
935 case SequentiallyConsistent:
936 return SequentiallyConsistent;
938 llvm_unreachable("Unknown ordering");
941 // ------------------- Visitors.
943 /// \brief Instrument LoadInst
945 /// Loads the corresponding shadow and (optionally) origin.
946 /// Optionally, checks that the load address is fully defined.
947 void visitLoadInst(LoadInst &I) {
948 assert(I.getType()->isSized() && "Load type must have size");
949 IRBuilder<> IRB(I.getNextNode());
950 Type *ShadowTy = getShadowTy(&I);
951 Value *Addr = I.getPointerOperand();
953 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
955 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
957 setShadow(&I, getCleanShadow(&I));
960 if (ClCheckAccessAddress)
961 insertCheck(I.getPointerOperand(), &I);
964 I.setOrdering(addAcquireOrdering(I.getOrdering()));
966 if (MS.TrackOrigins) {
968 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
970 IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
972 setOrigin(&I, getCleanOrigin());
977 /// \brief Instrument StoreInst
979 /// Stores the corresponding shadow and (optionally) origin.
980 /// Optionally, checks that the store address is fully defined.
981 void visitStoreInst(StoreInst &I) {
982 StoreList.push_back(&I);
985 void handleCASOrRMW(Instruction &I) {
986 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
989 Value *Addr = I.getOperand(0);
990 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
992 if (ClCheckAccessAddress)
993 insertCheck(Addr, &I);
995 // Only test the conditional argument of cmpxchg instruction.
996 // The other argument can potentially be uninitialized, but we can not
997 // detect this situation reliably without possible false positives.
998 if (isa<AtomicCmpXchgInst>(I))
999 insertCheck(I.getOperand(1), &I);
1001 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1003 setShadow(&I, getCleanShadow(&I));
1006 void visitAtomicRMWInst(AtomicRMWInst &I) {
1008 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1011 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1013 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1016 // Vector manipulation.
1017 void visitExtractElementInst(ExtractElementInst &I) {
1018 insertCheck(I.getOperand(1), &I);
1019 IRBuilder<> IRB(&I);
1020 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1022 setOrigin(&I, getOrigin(&I, 0));
1025 void visitInsertElementInst(InsertElementInst &I) {
1026 insertCheck(I.getOperand(2), &I);
1027 IRBuilder<> IRB(&I);
1028 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1029 I.getOperand(2), "_msprop"));
1030 setOriginForNaryOp(I);
1033 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1034 insertCheck(I.getOperand(2), &I);
1035 IRBuilder<> IRB(&I);
1036 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1037 I.getOperand(2), "_msprop"));
1038 setOriginForNaryOp(I);
1042 void visitSExtInst(SExtInst &I) {
1043 IRBuilder<> IRB(&I);
1044 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1045 setOrigin(&I, getOrigin(&I, 0));
1048 void visitZExtInst(ZExtInst &I) {
1049 IRBuilder<> IRB(&I);
1050 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1051 setOrigin(&I, getOrigin(&I, 0));
1054 void visitTruncInst(TruncInst &I) {
1055 IRBuilder<> IRB(&I);
1056 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1057 setOrigin(&I, getOrigin(&I, 0));
1060 void visitBitCastInst(BitCastInst &I) {
1061 IRBuilder<> IRB(&I);
1062 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1063 setOrigin(&I, getOrigin(&I, 0));
1066 void visitPtrToIntInst(PtrToIntInst &I) {
1067 IRBuilder<> IRB(&I);
1068 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1069 "_msprop_ptrtoint"));
1070 setOrigin(&I, getOrigin(&I, 0));
1073 void visitIntToPtrInst(IntToPtrInst &I) {
1074 IRBuilder<> IRB(&I);
1075 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1076 "_msprop_inttoptr"));
1077 setOrigin(&I, getOrigin(&I, 0));
1080 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1081 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1082 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1083 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1084 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1085 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1087 /// \brief Propagate shadow for bitwise AND.
1089 /// This code is exact, i.e. if, for example, a bit in the left argument
1090 /// is defined and 0, then neither the value not definedness of the
1091 /// corresponding bit in B don't affect the resulting shadow.
1092 void visitAnd(BinaryOperator &I) {
1093 IRBuilder<> IRB(&I);
1094 // "And" of 0 and a poisoned value results in unpoisoned value.
1095 // 1&1 => 1; 0&1 => 0; p&1 => p;
1096 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1097 // 1&p => p; 0&p => 0; p&p => p;
1098 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1099 Value *S1 = getShadow(&I, 0);
1100 Value *S2 = getShadow(&I, 1);
1101 Value *V1 = I.getOperand(0);
1102 Value *V2 = I.getOperand(1);
1103 if (V1->getType() != S1->getType()) {
1104 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1105 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1107 Value *S1S2 = IRB.CreateAnd(S1, S2);
1108 Value *V1S2 = IRB.CreateAnd(V1, S2);
1109 Value *S1V2 = IRB.CreateAnd(S1, V2);
1110 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1111 setOriginForNaryOp(I);
1114 void visitOr(BinaryOperator &I) {
1115 IRBuilder<> IRB(&I);
1116 // "Or" of 1 and a poisoned value results in unpoisoned value.
1117 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1118 // 1|0 => 1; 0|0 => 0; p|0 => p;
1119 // 1|p => 1; 0|p => p; p|p => p;
1120 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1121 Value *S1 = getShadow(&I, 0);
1122 Value *S2 = getShadow(&I, 1);
1123 Value *V1 = IRB.CreateNot(I.getOperand(0));
1124 Value *V2 = IRB.CreateNot(I.getOperand(1));
1125 if (V1->getType() != S1->getType()) {
1126 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1127 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1129 Value *S1S2 = IRB.CreateAnd(S1, S2);
1130 Value *V1S2 = IRB.CreateAnd(V1, S2);
1131 Value *S1V2 = IRB.CreateAnd(S1, V2);
1132 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1133 setOriginForNaryOp(I);
1136 /// \brief Default propagation of shadow and/or origin.
1138 /// This class implements the general case of shadow propagation, used in all
1139 /// cases where we don't know and/or don't care about what the operation
1140 /// actually does. It converts all input shadow values to a common type
1141 /// (extending or truncating as necessary), and bitwise OR's them.
1143 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1144 /// fully initialized), and less prone to false positives.
1146 /// This class also implements the general case of origin propagation. For a
1147 /// Nary operation, result origin is set to the origin of an argument that is
1148 /// not entirely initialized. If there is more than one such arguments, the
1149 /// rightmost of them is picked. It does not matter which one is picked if all
1150 /// arguments are initialized.
1151 template <bool CombineShadow>
1156 MemorySanitizerVisitor *MSV;
1159 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
1160 Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {}
1162 /// \brief Add a pair of shadow and origin values to the mix.
1163 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1164 if (CombineShadow) {
1169 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1170 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1174 if (MSV->MS.TrackOrigins) {
1179 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1180 Value *Cond = IRB.CreateICmpNE(FlatShadow,
1181 MSV->getCleanShadow(FlatShadow));
1182 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1188 /// \brief Add an application value to the mix.
1189 Combiner &Add(Value *V) {
1190 Value *OpShadow = MSV->getShadow(V);
1191 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0;
1192 return Add(OpShadow, OpOrigin);
1195 /// \brief Set the current combined values as the given instruction's shadow
1197 void Done(Instruction *I) {
1198 if (CombineShadow) {
1200 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1201 MSV->setShadow(I, Shadow);
1203 if (MSV->MS.TrackOrigins) {
1205 MSV->setOrigin(I, Origin);
1210 typedef Combiner<true> ShadowAndOriginCombiner;
1211 typedef Combiner<false> OriginCombiner;
1213 /// \brief Propagate origin for arbitrary operation.
1214 void setOriginForNaryOp(Instruction &I) {
1215 if (!MS.TrackOrigins) return;
1216 IRBuilder<> IRB(&I);
1217 OriginCombiner OC(this, IRB);
1218 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1223 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1224 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1225 "Vector of pointers is not a valid shadow type");
1226 return Ty->isVectorTy() ?
1227 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1228 Ty->getPrimitiveSizeInBits();
1231 /// \brief Cast between two shadow types, extending or truncating as
1233 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy) {
1234 Type *srcTy = V->getType();
1235 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1236 return IRB.CreateIntCast(V, dstTy, false);
1237 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1238 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1239 return IRB.CreateIntCast(V, dstTy, false);
1240 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1241 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1242 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1244 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), false);
1245 return IRB.CreateBitCast(V2, dstTy);
1246 // TODO: handle struct types.
1249 /// \brief Propagate shadow for arbitrary operation.
1250 void handleShadowOr(Instruction &I) {
1251 IRBuilder<> IRB(&I);
1252 ShadowAndOriginCombiner SC(this, IRB);
1253 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1258 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1259 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1260 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1261 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1262 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1263 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1264 void visitMul(BinaryOperator &I) { handleShadowOr(I); }
1266 void handleDiv(Instruction &I) {
1267 IRBuilder<> IRB(&I);
1268 // Strict on the second argument.
1269 insertCheck(I.getOperand(1), &I);
1270 setShadow(&I, getShadow(&I, 0));
1271 setOrigin(&I, getOrigin(&I, 0));
1274 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1275 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1276 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1277 void visitURem(BinaryOperator &I) { handleDiv(I); }
1278 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1279 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1281 /// \brief Instrument == and != comparisons.
1283 /// Sometimes the comparison result is known even if some of the bits of the
1284 /// arguments are not.
1285 void handleEqualityComparison(ICmpInst &I) {
1286 IRBuilder<> IRB(&I);
1287 Value *A = I.getOperand(0);
1288 Value *B = I.getOperand(1);
1289 Value *Sa = getShadow(A);
1290 Value *Sb = getShadow(B);
1292 // Get rid of pointers and vectors of pointers.
1293 // For ints (and vectors of ints), types of A and Sa match,
1294 // and this is a no-op.
1295 A = IRB.CreatePointerCast(A, Sa->getType());
1296 B = IRB.CreatePointerCast(B, Sb->getType());
1298 // A == B <==> (C = A^B) == 0
1299 // A != B <==> (C = A^B) != 0
1301 Value *C = IRB.CreateXor(A, B);
1302 Value *Sc = IRB.CreateOr(Sa, Sb);
1303 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1304 // Result is defined if one of the following is true
1305 // * there is a defined 1 bit in C
1306 // * C is fully defined
1307 // Si = !(C & ~Sc) && Sc
1308 Value *Zero = Constant::getNullValue(Sc->getType());
1309 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1311 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1313 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1314 Si->setName("_msprop_icmp");
1316 setOriginForNaryOp(I);
1319 /// \brief Build the lowest possible value of V, taking into account V's
1320 /// uninitialized bits.
1321 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1324 // Split shadow into sign bit and other bits.
1325 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1326 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1327 // Maximise the undefined shadow bit, minimize other undefined bits.
1329 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1331 // Minimize undefined bits.
1332 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1336 /// \brief Build the highest possible value of V, taking into account V's
1337 /// uninitialized bits.
1338 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1341 // Split shadow into sign bit and other bits.
1342 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1343 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1344 // Minimise the undefined shadow bit, maximise other undefined bits.
1346 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1348 // Maximize undefined bits.
1349 return IRB.CreateOr(A, Sa);
1353 /// \brief Instrument relational comparisons.
1355 /// This function does exact shadow propagation for all relational
1356 /// comparisons of integers, pointers and vectors of those.
1357 /// FIXME: output seems suboptimal when one of the operands is a constant
1358 void handleRelationalComparisonExact(ICmpInst &I) {
1359 IRBuilder<> IRB(&I);
1360 Value *A = I.getOperand(0);
1361 Value *B = I.getOperand(1);
1362 Value *Sa = getShadow(A);
1363 Value *Sb = getShadow(B);
1365 // Get rid of pointers and vectors of pointers.
1366 // For ints (and vectors of ints), types of A and Sa match,
1367 // and this is a no-op.
1368 A = IRB.CreatePointerCast(A, Sa->getType());
1369 B = IRB.CreatePointerCast(B, Sb->getType());
1371 // Let [a0, a1] be the interval of possible values of A, taking into account
1372 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1373 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1374 bool IsSigned = I.isSigned();
1375 Value *S1 = IRB.CreateICmp(I.getPredicate(),
1376 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1377 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1378 Value *S2 = IRB.CreateICmp(I.getPredicate(),
1379 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1380 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1381 Value *Si = IRB.CreateXor(S1, S2);
1383 setOriginForNaryOp(I);
1386 /// \brief Instrument signed relational comparisons.
1388 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by
1389 /// propagating the highest bit of the shadow. Everything else is delegated
1390 /// to handleShadowOr().
1391 void handleSignedRelationalComparison(ICmpInst &I) {
1392 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1393 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1395 CmpInst::Predicate pre = I.getPredicate();
1396 if (constOp0 && constOp0->isNullValue() &&
1397 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
1398 op = I.getOperand(1);
1399 } else if (constOp1 && constOp1->isNullValue() &&
1400 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) {
1401 op = I.getOperand(0);
1404 IRBuilder<> IRB(&I);
1406 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt");
1407 setShadow(&I, Shadow);
1408 setOrigin(&I, getOrigin(op));
1414 void visitICmpInst(ICmpInst &I) {
1415 if (!ClHandleICmp) {
1419 if (I.isEquality()) {
1420 handleEqualityComparison(I);
1424 assert(I.isRelational());
1425 if (ClHandleICmpExact) {
1426 handleRelationalComparisonExact(I);
1430 handleSignedRelationalComparison(I);
1434 assert(I.isUnsigned());
1435 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
1436 handleRelationalComparisonExact(I);
1443 void visitFCmpInst(FCmpInst &I) {
1447 void handleShift(BinaryOperator &I) {
1448 IRBuilder<> IRB(&I);
1449 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1450 // Otherwise perform the same shift on S1.
1451 Value *S1 = getShadow(&I, 0);
1452 Value *S2 = getShadow(&I, 1);
1453 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1455 Value *V2 = I.getOperand(1);
1456 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1457 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1458 setOriginForNaryOp(I);
1461 void visitShl(BinaryOperator &I) { handleShift(I); }
1462 void visitAShr(BinaryOperator &I) { handleShift(I); }
1463 void visitLShr(BinaryOperator &I) { handleShift(I); }
1465 /// \brief Instrument llvm.memmove
1467 /// At this point we don't know if llvm.memmove will be inlined or not.
1468 /// If we don't instrument it and it gets inlined,
1469 /// our interceptor will not kick in and we will lose the memmove.
1470 /// If we instrument the call here, but it does not get inlined,
1471 /// we will memove the shadow twice: which is bad in case
1472 /// of overlapping regions. So, we simply lower the intrinsic to a call.
1474 /// Similar situation exists for memcpy and memset.
1475 void visitMemMoveInst(MemMoveInst &I) {
1476 IRBuilder<> IRB(&I);
1479 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1480 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1481 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1482 I.eraseFromParent();
1485 // Similar to memmove: avoid copying shadow twice.
1486 // This is somewhat unfortunate as it may slowdown small constant memcpys.
1487 // FIXME: consider doing manual inline for small constant sizes and proper
1489 void visitMemCpyInst(MemCpyInst &I) {
1490 IRBuilder<> IRB(&I);
1493 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1494 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1495 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1496 I.eraseFromParent();
1500 void visitMemSetInst(MemSetInst &I) {
1501 IRBuilder<> IRB(&I);
1504 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1505 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1506 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1507 I.eraseFromParent();
1510 void visitVAStartInst(VAStartInst &I) {
1511 VAHelper->visitVAStartInst(I);
1514 void visitVACopyInst(VACopyInst &I) {
1515 VAHelper->visitVACopyInst(I);
1518 enum IntrinsicKind {
1519 IK_DoesNotAccessMemory,
1524 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) {
1525 const int DoesNotAccessMemory = IK_DoesNotAccessMemory;
1526 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory;
1527 const int OnlyReadsMemory = IK_OnlyReadsMemory;
1528 const int OnlyAccessesArgumentPointees = IK_WritesMemory;
1529 const int UnknownModRefBehavior = IK_WritesMemory;
1530 #define GET_INTRINSIC_MODREF_BEHAVIOR
1531 #define ModRefBehavior IntrinsicKind
1532 #include "llvm/IR/Intrinsics.gen"
1533 #undef ModRefBehavior
1534 #undef GET_INTRINSIC_MODREF_BEHAVIOR
1537 /// \brief Handle vector store-like intrinsics.
1539 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1540 /// has 1 pointer argument and 1 vector argument, returns void.
1541 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1542 IRBuilder<> IRB(&I);
1543 Value* Addr = I.getArgOperand(0);
1544 Value *Shadow = getShadow(&I, 1);
1545 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1547 // We don't know the pointer alignment (could be unaligned SSE store!).
1548 // Have to assume to worst case.
1549 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1551 if (ClCheckAccessAddress)
1552 insertCheck(Addr, &I);
1554 // FIXME: use ClStoreCleanOrigin
1555 // FIXME: factor out common code from materializeStores
1556 if (MS.TrackOrigins)
1557 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB));
1561 /// \brief Handle vector load-like intrinsics.
1563 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1564 /// has 1 pointer argument, returns a vector.
1565 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1566 IRBuilder<> IRB(&I);
1567 Value *Addr = I.getArgOperand(0);
1569 Type *ShadowTy = getShadowTy(&I);
1571 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1572 // We don't know the pointer alignment (could be unaligned SSE load!).
1573 // Have to assume to worst case.
1574 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1576 setShadow(&I, getCleanShadow(&I));
1580 if (ClCheckAccessAddress)
1581 insertCheck(Addr, &I);
1583 if (MS.TrackOrigins) {
1585 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
1587 setOrigin(&I, getCleanOrigin());
1592 /// \brief Handle (SIMD arithmetic)-like intrinsics.
1594 /// Instrument intrinsics with any number of arguments of the same type,
1595 /// equal to the return type. The type should be simple (no aggregates or
1596 /// pointers; vectors are fine).
1597 /// Caller guarantees that this intrinsic does not access memory.
1598 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1599 Type *RetTy = I.getType();
1600 if (!(RetTy->isIntOrIntVectorTy() ||
1601 RetTy->isFPOrFPVectorTy() ||
1602 RetTy->isX86_MMXTy()))
1605 unsigned NumArgOperands = I.getNumArgOperands();
1607 for (unsigned i = 0; i < NumArgOperands; ++i) {
1608 Type *Ty = I.getArgOperand(i)->getType();
1613 IRBuilder<> IRB(&I);
1614 ShadowAndOriginCombiner SC(this, IRB);
1615 for (unsigned i = 0; i < NumArgOperands; ++i)
1616 SC.Add(I.getArgOperand(i));
1622 /// \brief Heuristically instrument unknown intrinsics.
1624 /// The main purpose of this code is to do something reasonable with all
1625 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
1626 /// We recognize several classes of intrinsics by their argument types and
1627 /// ModRefBehaviour and apply special intrumentation when we are reasonably
1628 /// sure that we know what the intrinsic does.
1630 /// We special-case intrinsics where this approach fails. See llvm.bswap
1631 /// handling as an example of that.
1632 bool handleUnknownIntrinsic(IntrinsicInst &I) {
1633 unsigned NumArgOperands = I.getNumArgOperands();
1634 if (NumArgOperands == 0)
1637 Intrinsic::ID iid = I.getIntrinsicID();
1638 IntrinsicKind IK = getIntrinsicKind(iid);
1639 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory;
1640 bool WritesMemory = IK == IK_WritesMemory;
1641 assert(!(OnlyReadsMemory && WritesMemory));
1643 if (NumArgOperands == 2 &&
1644 I.getArgOperand(0)->getType()->isPointerTy() &&
1645 I.getArgOperand(1)->getType()->isVectorTy() &&
1646 I.getType()->isVoidTy() &&
1648 // This looks like a vector store.
1649 return handleVectorStoreIntrinsic(I);
1652 if (NumArgOperands == 1 &&
1653 I.getArgOperand(0)->getType()->isPointerTy() &&
1654 I.getType()->isVectorTy() &&
1656 // This looks like a vector load.
1657 return handleVectorLoadIntrinsic(I);
1660 if (!OnlyReadsMemory && !WritesMemory)
1661 if (maybeHandleSimpleNomemIntrinsic(I))
1664 // FIXME: detect and handle SSE maskstore/maskload
1668 void handleBswap(IntrinsicInst &I) {
1669 IRBuilder<> IRB(&I);
1670 Value *Op = I.getArgOperand(0);
1671 Type *OpType = Op->getType();
1672 Function *BswapFunc = Intrinsic::getDeclaration(
1673 F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1));
1674 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
1675 setOrigin(&I, getOrigin(Op));
1678 void visitIntrinsicInst(IntrinsicInst &I) {
1679 switch (I.getIntrinsicID()) {
1680 case llvm::Intrinsic::bswap:
1684 if (!handleUnknownIntrinsic(I))
1685 visitInstruction(I);
1690 // Replace call to (*Fn) with a call to (*IndirectCallWrapperFn(Fn)).
1691 void wrapIndirectCall(IRBuilder<> &IRB, CallSite CS) {
1692 Value *Fn = CS.getCalledValue();
1693 Value *NewFn = IRB.CreateBitCast(
1694 IRB.CreateCall(MS.IndirectCallWrapperFn,
1695 IRB.CreateBitCast(Fn, MS.AnyFunctionPtrTy)),
1697 setShadow(NewFn, getShadow(Fn));
1698 CS.setCalledFunction(NewFn);
1701 void visitCallSite(CallSite CS) {
1702 Instruction &I = *CS.getInstruction();
1703 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
1705 CallInst *Call = cast<CallInst>(&I);
1707 // For inline asm, do the usual thing: check argument shadow and mark all
1708 // outputs as clean. Note that any side effects of the inline asm that are
1709 // not immediately visible in its constraints are not handled.
1710 if (Call->isInlineAsm()) {
1711 visitInstruction(I);
1715 // Allow only tail calls with the same types, otherwise
1716 // we may have a false positive: shadow for a non-void RetVal
1717 // will get propagated to a void RetVal.
1718 if (Call->isTailCall() && Call->getType() != Call->getParent()->getType())
1719 Call->setTailCall(false);
1721 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
1723 // We are going to insert code that relies on the fact that the callee
1724 // will become a non-readonly function after it is instrumented by us. To
1725 // prevent this code from being optimized out, mark that function
1726 // non-readonly in advance.
1727 if (Function *Func = Call->getCalledFunction()) {
1728 // Clear out readonly/readnone attributes.
1730 B.addAttribute(Attribute::ReadOnly)
1731 .addAttribute(Attribute::ReadNone);
1732 Func->removeAttributes(AttributeSet::FunctionIndex,
1733 AttributeSet::get(Func->getContext(),
1734 AttributeSet::FunctionIndex,
1738 IRBuilder<> IRB(&I);
1740 if (MS.WrapIndirectCalls && !CS.getCalledFunction())
1741 wrapIndirectCall(IRB, CS);
1743 unsigned ArgOffset = 0;
1744 DEBUG(dbgs() << " CallSite: " << I << "\n");
1745 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
1746 ArgIt != End; ++ArgIt) {
1748 unsigned i = ArgIt - CS.arg_begin();
1749 if (!A->getType()->isSized()) {
1750 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
1755 // Compute the Shadow for arg even if it is ByVal, because
1756 // in that case getShadow() will copy the actual arg shadow to
1757 // __msan_param_tls.
1758 Value *ArgShadow = getShadow(A);
1759 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
1760 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
1761 " Shadow: " << *ArgShadow << "\n");
1762 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
1763 assert(A->getType()->isPointerTy() &&
1764 "ByVal argument is not a pointer!");
1765 Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType());
1766 unsigned Alignment = CS.getParamAlignment(i + 1);
1767 Store = IRB.CreateMemCpy(ArgShadowBase,
1768 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
1771 Size = MS.TD->getTypeAllocSize(A->getType());
1772 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
1773 kShadowTLSAlignment);
1775 if (MS.TrackOrigins)
1776 IRB.CreateStore(getOrigin(A),
1777 getOriginPtrForArgument(A, IRB, ArgOffset));
1779 assert(Size != 0 && Store != 0);
1780 DEBUG(dbgs() << " Param:" << *Store << "\n");
1781 ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
1783 DEBUG(dbgs() << " done with call args\n");
1786 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
1787 if (FT->isVarArg()) {
1788 VAHelper->visitCallSite(CS, IRB);
1791 // Now, get the shadow for the RetVal.
1792 if (!I.getType()->isSized()) return;
1793 IRBuilder<> IRBBefore(&I);
1794 // Untill we have full dynamic coverage, make sure the retval shadow is 0.
1795 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
1796 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
1797 Instruction *NextInsn = 0;
1799 NextInsn = I.getNextNode();
1801 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
1802 if (!NormalDest->getSinglePredecessor()) {
1803 // FIXME: this case is tricky, so we are just conservative here.
1804 // Perhaps we need to split the edge between this BB and NormalDest,
1805 // but a naive attempt to use SplitEdge leads to a crash.
1806 setShadow(&I, getCleanShadow(&I));
1807 setOrigin(&I, getCleanOrigin());
1810 NextInsn = NormalDest->getFirstInsertionPt();
1812 "Could not find insertion point for retval shadow load");
1814 IRBuilder<> IRBAfter(NextInsn);
1815 Value *RetvalShadow =
1816 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
1817 kShadowTLSAlignment, "_msret");
1818 setShadow(&I, RetvalShadow);
1819 if (MS.TrackOrigins)
1820 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
1823 void visitReturnInst(ReturnInst &I) {
1824 IRBuilder<> IRB(&I);
1825 Value *RetVal = I.getReturnValue();
1826 if (!RetVal) return;
1827 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
1828 if (CheckReturnValue) {
1829 insertCheck(RetVal, &I);
1830 Value *Shadow = getCleanShadow(RetVal);
1831 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
1833 Value *Shadow = getShadow(RetVal);
1834 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
1835 // FIXME: make it conditional if ClStoreCleanOrigin==0
1836 if (MS.TrackOrigins)
1837 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
1841 void visitPHINode(PHINode &I) {
1842 IRBuilder<> IRB(&I);
1843 ShadowPHINodes.push_back(&I);
1844 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
1846 if (MS.TrackOrigins)
1847 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
1851 void visitAllocaInst(AllocaInst &I) {
1852 setShadow(&I, getCleanShadow(&I));
1853 IRBuilder<> IRB(I.getNextNode());
1854 uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType());
1855 if (PoisonStack && ClPoisonStackWithCall) {
1856 IRB.CreateCall2(MS.MsanPoisonStackFn,
1857 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
1858 ConstantInt::get(MS.IntptrTy, Size));
1860 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
1861 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
1862 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment());
1865 if (PoisonStack && MS.TrackOrigins) {
1866 setOrigin(&I, getCleanOrigin());
1867 SmallString<2048> StackDescriptionStorage;
1868 raw_svector_ostream StackDescription(StackDescriptionStorage);
1869 // We create a string with a description of the stack allocation and
1870 // pass it into __msan_set_alloca_origin.
1871 // It will be printed by the run-time if stack-originated UMR is found.
1872 // The first 4 bytes of the string are set to '----' and will be replaced
1873 // by __msan_va_arg_overflow_size_tls at the first call.
1874 StackDescription << "----" << I.getName() << "@" << F.getName();
1876 createPrivateNonConstGlobalForString(*F.getParent(),
1877 StackDescription.str());
1879 IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn,
1880 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
1881 ConstantInt::get(MS.IntptrTy, Size),
1882 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
1883 IRB.CreatePointerCast(&F, MS.IntptrTy));
1887 void visitSelectInst(SelectInst& I) {
1888 IRBuilder<> IRB(&I);
1889 // a = select b, c, d
1890 Value *S = IRB.CreateSelect(I.getCondition(), getShadow(I.getTrueValue()),
1891 getShadow(I.getFalseValue()));
1892 if (I.getType()->isAggregateType()) {
1893 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
1894 // an extra "select". This results in much more compact IR.
1895 // Sa = select Sb, poisoned, (select b, Sc, Sd)
1896 S = IRB.CreateSelect(getShadow(I.getCondition()),
1897 getPoisonedShadow(getShadowTy(I.getType())), S,
1898 "_msprop_select_agg");
1900 // Sa = (sext Sb) | (select b, Sc, Sd)
1902 S, IRB.CreateSExt(getShadow(I.getCondition()), S->getType()),
1906 if (MS.TrackOrigins) {
1907 // Origins are always i32, so any vector conditions must be flattened.
1908 // FIXME: consider tracking vector origins for app vectors?
1909 Value *Cond = I.getCondition();
1910 if (Cond->getType()->isVectorTy()) {
1911 Value *ConvertedShadow = convertToShadowTyNoVec(Cond, IRB);
1912 Cond = IRB.CreateICmpNE(ConvertedShadow,
1913 getCleanShadow(ConvertedShadow), "_mso_select");
1915 setOrigin(&I, IRB.CreateSelect(Cond,
1916 getOrigin(I.getTrueValue()), getOrigin(I.getFalseValue())));
1920 void visitLandingPadInst(LandingPadInst &I) {
1922 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
1923 setShadow(&I, getCleanShadow(&I));
1924 setOrigin(&I, getCleanOrigin());
1927 void visitGetElementPtrInst(GetElementPtrInst &I) {
1931 void visitExtractValueInst(ExtractValueInst &I) {
1932 IRBuilder<> IRB(&I);
1933 Value *Agg = I.getAggregateOperand();
1934 DEBUG(dbgs() << "ExtractValue: " << I << "\n");
1935 Value *AggShadow = getShadow(Agg);
1936 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
1937 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
1938 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
1939 setShadow(&I, ResShadow);
1940 setOrigin(&I, getCleanOrigin());
1943 void visitInsertValueInst(InsertValueInst &I) {
1944 IRBuilder<> IRB(&I);
1945 DEBUG(dbgs() << "InsertValue: " << I << "\n");
1946 Value *AggShadow = getShadow(I.getAggregateOperand());
1947 Value *InsShadow = getShadow(I.getInsertedValueOperand());
1948 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
1949 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
1950 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
1951 DEBUG(dbgs() << " Res: " << *Res << "\n");
1953 setOrigin(&I, getCleanOrigin());
1956 void dumpInst(Instruction &I) {
1957 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1958 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
1960 errs() << "ZZZ " << I.getOpcodeName() << "\n";
1962 errs() << "QQQ " << I << "\n";
1965 void visitResumeInst(ResumeInst &I) {
1966 DEBUG(dbgs() << "Resume: " << I << "\n");
1967 // Nothing to do here.
1970 void visitInstruction(Instruction &I) {
1971 // Everything else: stop propagating and check for poisoned shadow.
1972 if (ClDumpStrictInstructions)
1974 DEBUG(dbgs() << "DEFAULT: " << I << "\n");
1975 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
1976 insertCheck(I.getOperand(i), &I);
1977 setShadow(&I, getCleanShadow(&I));
1978 setOrigin(&I, getCleanOrigin());
1982 /// \brief AMD64-specific implementation of VarArgHelper.
1983 struct VarArgAMD64Helper : public VarArgHelper {
1984 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
1985 // See a comment in visitCallSite for more details.
1986 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
1987 static const unsigned AMD64FpEndOffset = 176;
1990 MemorySanitizer &MS;
1991 MemorySanitizerVisitor &MSV;
1992 Value *VAArgTLSCopy;
1993 Value *VAArgOverflowSize;
1995 SmallVector<CallInst*, 16> VAStartInstrumentationList;
1997 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
1998 MemorySanitizerVisitor &MSV)
1999 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { }
2001 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
2003 ArgKind classifyArgument(Value* arg) {
2004 // A very rough approximation of X86_64 argument classification rules.
2005 Type *T = arg->getType();
2006 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
2007 return AK_FloatingPoint;
2008 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
2009 return AK_GeneralPurpose;
2010 if (T->isPointerTy())
2011 return AK_GeneralPurpose;
2015 // For VarArg functions, store the argument shadow in an ABI-specific format
2016 // that corresponds to va_list layout.
2017 // We do this because Clang lowers va_arg in the frontend, and this pass
2018 // only sees the low level code that deals with va_list internals.
2019 // A much easier alternative (provided that Clang emits va_arg instructions)
2020 // would have been to associate each live instance of va_list with a copy of
2021 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
2023 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {
2024 unsigned GpOffset = 0;
2025 unsigned FpOffset = AMD64GpEndOffset;
2026 unsigned OverflowOffset = AMD64FpEndOffset;
2027 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2028 ArgIt != End; ++ArgIt) {
2030 ArgKind AK = classifyArgument(A);
2031 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
2033 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
2037 case AK_GeneralPurpose:
2038 Base = getShadowPtrForVAArgument(A, IRB, GpOffset);
2041 case AK_FloatingPoint:
2042 Base = getShadowPtrForVAArgument(A, IRB, FpOffset);
2046 uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType());
2047 Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset);
2048 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
2050 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
2052 Constant *OverflowSize =
2053 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
2054 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
2057 /// \brief Compute the shadow address for a given va_arg.
2058 Value *getShadowPtrForVAArgument(Value *A, IRBuilder<> &IRB,
2060 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
2061 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
2062 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(A), 0),
2066 void visitVAStartInst(VAStartInst &I) {
2067 IRBuilder<> IRB(&I);
2068 VAStartInstrumentationList.push_back(&I);
2069 Value *VAListTag = I.getArgOperand(0);
2070 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2072 // Unpoison the whole __va_list_tag.
2073 // FIXME: magic ABI constants.
2074 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2075 /* size */24, /* alignment */8, false);
2078 void visitVACopyInst(VACopyInst &I) {
2079 IRBuilder<> IRB(&I);
2080 Value *VAListTag = I.getArgOperand(0);
2081 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2083 // Unpoison the whole __va_list_tag.
2084 // FIXME: magic ABI constants.
2085 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2086 /* size */24, /* alignment */8, false);
2089 void finalizeInstrumentation() {
2090 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
2091 "finalizeInstrumentation called twice");
2092 if (!VAStartInstrumentationList.empty()) {
2093 // If there is a va_start in this function, make a backup copy of
2094 // va_arg_tls somewhere in the function entry block.
2095 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
2096 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
2098 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
2100 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
2101 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
2104 // Instrument va_start.
2105 // Copy va_list shadow from the backup copy of the TLS contents.
2106 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
2107 CallInst *OrigInst = VAStartInstrumentationList[i];
2108 IRBuilder<> IRB(OrigInst->getNextNode());
2109 Value *VAListTag = OrigInst->getArgOperand(0);
2111 Value *RegSaveAreaPtrPtr =
2113 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2114 ConstantInt::get(MS.IntptrTy, 16)),
2115 Type::getInt64PtrTy(*MS.C));
2116 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
2117 Value *RegSaveAreaShadowPtr =
2118 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
2119 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
2120 AMD64FpEndOffset, 16);
2122 Value *OverflowArgAreaPtrPtr =
2124 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2125 ConstantInt::get(MS.IntptrTy, 8)),
2126 Type::getInt64PtrTy(*MS.C));
2127 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
2128 Value *OverflowArgAreaShadowPtr =
2129 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
2130 Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset);
2131 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
2136 /// \brief A no-op implementation of VarArgHelper.
2137 struct VarArgNoOpHelper : public VarArgHelper {
2138 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
2139 MemorySanitizerVisitor &MSV) {}
2141 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {}
2143 void visitVAStartInst(VAStartInst &I) {}
2145 void visitVACopyInst(VACopyInst &I) {}
2147 void finalizeInstrumentation() {}
2150 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
2151 MemorySanitizerVisitor &Visitor) {
2152 // VarArg handling is only implemented on AMD64. False positives are possible
2153 // on other platforms.
2154 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
2155 if (TargetTriple.getArch() == llvm::Triple::x86_64)
2156 return new VarArgAMD64Helper(Func, Msan, Visitor);
2158 return new VarArgNoOpHelper(Func, Msan, Visitor);
2163 bool MemorySanitizer::runOnFunction(Function &F) {
2164 MemorySanitizerVisitor Visitor(F, *this);
2166 // Clear out readonly/readnone attributes.
2168 B.addAttribute(Attribute::ReadOnly)
2169 .addAttribute(Attribute::ReadNone);
2170 F.removeAttributes(AttributeSet::FunctionIndex,
2171 AttributeSet::get(F.getContext(),
2172 AttributeSet::FunctionIndex, B));
2174 return Visitor.runOnFunction();