1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
13 /// Status: early prototype.
15 /// The algorithm of the tool is similar to Memcheck
16 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
17 /// byte of the application memory, poison the shadow of the malloc-ed
18 /// or alloca-ed memory, load the shadow bits on every memory read,
19 /// propagate the shadow bits through some of the arithmetic
20 /// instruction (including MOV), store the shadow bits on every memory
21 /// write, report a bug on some other instructions (e.g. JMP) if the
22 /// associated shadow is poisoned.
24 /// But there are differences too. The first and the major one:
25 /// compiler instrumentation instead of binary instrumentation. This
26 /// gives us much better register allocation, possible compiler
27 /// optimizations and a fast start-up. But this brings the major issue
28 /// as well: msan needs to see all program events, including system
29 /// calls and reads/writes in system libraries, so we either need to
30 /// compile *everything* with msan or use a binary translation
31 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
32 /// Another difference from Memcheck is that we use 8 shadow bits per
33 /// byte of application memory and use a direct shadow mapping. This
34 /// greatly simplifies the instrumentation code and avoids races on
35 /// shadow updates (Memcheck is single-threaded so races are not a
36 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
37 /// path storage that uses 8 bits per byte).
39 /// The default value of shadow is 0, which means "clean" (not poisoned).
41 /// Every module initializer should call __msan_init to ensure that the
42 /// shadow memory is ready. On error, __msan_warning is called. Since
43 /// parameters and return values may be passed via registers, we have a
44 /// specialized thread-local shadow for return values
45 /// (__msan_retval_tls) and parameters (__msan_param_tls).
49 /// MemorySanitizer can track origins (allocation points) of all uninitialized
50 /// values. This behavior is controlled with a flag (msan-track-origins) and is
51 /// disabled by default.
53 /// Origins are 4-byte values created and interpreted by the runtime library.
54 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
55 /// of application memory. Propagation of origins is basically a bunch of
56 /// "select" instructions that pick the origin of a dirty argument, if an
57 /// instruction has one.
59 /// Every 4 aligned, consecutive bytes of application memory have one origin
60 /// value associated with them. If these bytes contain uninitialized data
61 /// coming from 2 different allocations, the last store wins. Because of this,
62 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
65 /// Origins are meaningless for fully initialized values, so MemorySanitizer
66 /// avoids storing origin to memory when a fully initialized value is stored.
67 /// This way it avoids needless overwritting origin of the 4-byte region on
68 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
72 /// Ideally, every atomic store of application value should update the
73 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
74 /// of two disjoint locations can not be done without severe slowdown.
76 /// Therefore, we implement an approximation that may err on the safe side.
77 /// In this implementation, every atomically accessed location in the program
78 /// may only change from (partially) uninitialized to fully initialized, but
79 /// not the other way around. We load the shadow _after_ the application load,
80 /// and we store the shadow _before_ the app store. Also, we always store clean
81 /// shadow (if the application store is atomic). This way, if the store-load
82 /// pair constitutes a happens-before arc, shadow store and load are correctly
83 /// ordered such that the load will get either the value that was stored, or
84 /// some later value (which is always clean).
86 /// This does not work very well with Compare-And-Swap (CAS) and
87 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
88 /// must store the new shadow before the app operation, and load the shadow
89 /// after the app operation. Computers don't work this way. Current
90 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
91 /// value. It implements the store part as a simple atomic store by storing a
94 //===----------------------------------------------------------------------===//
96 #define DEBUG_TYPE "msan"
98 #include "llvm/Transforms/Instrumentation.h"
99 #include "llvm/ADT/DepthFirstIterator.h"
100 #include "llvm/ADT/SmallString.h"
101 #include "llvm/ADT/SmallVector.h"
102 #include "llvm/ADT/Triple.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/Function.h"
105 #include "llvm/IR/IRBuilder.h"
106 #include "llvm/IR/InlineAsm.h"
107 #include "llvm/IR/InstVisitor.h"
108 #include "llvm/IR/IntrinsicInst.h"
109 #include "llvm/IR/LLVMContext.h"
110 #include "llvm/IR/MDBuilder.h"
111 #include "llvm/IR/Module.h"
112 #include "llvm/IR/Type.h"
113 #include "llvm/IR/ValueMap.h"
114 #include "llvm/Support/CommandLine.h"
115 #include "llvm/Support/Compiler.h"
116 #include "llvm/Support/Debug.h"
117 #include "llvm/Support/raw_ostream.h"
118 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
119 #include "llvm/Transforms/Utils/Local.h"
120 #include "llvm/Transforms/Utils/ModuleUtils.h"
121 #include "llvm/Transforms/Utils/SpecialCaseList.h"
123 using namespace llvm;
125 static const uint64_t kShadowMask32 = 1ULL << 31;
126 static const uint64_t kShadowMask64 = 1ULL << 46;
127 static const uint64_t kOriginOffset32 = 1ULL << 30;
128 static const uint64_t kOriginOffset64 = 1ULL << 45;
129 static const unsigned kMinOriginAlignment = 4;
130 static const unsigned kShadowTLSAlignment = 8;
132 /// \brief Track origins of uninitialized values.
134 /// Adds a section to MemorySanitizer report that points to the allocation
135 /// (stack or heap) the uninitialized bits came from originally.
136 static cl::opt<int> ClTrackOrigins("msan-track-origins",
137 cl::desc("Track origins (allocation sites) of poisoned memory"),
138 cl::Hidden, cl::init(0));
139 static cl::opt<bool> ClKeepGoing("msan-keep-going",
140 cl::desc("keep going after reporting a UMR"),
141 cl::Hidden, cl::init(false));
142 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
143 cl::desc("poison uninitialized stack variables"),
144 cl::Hidden, cl::init(true));
145 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
146 cl::desc("poison uninitialized stack variables with a call"),
147 cl::Hidden, cl::init(false));
148 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
149 cl::desc("poison uninitialized stack variables with the given patter"),
150 cl::Hidden, cl::init(0xff));
151 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
152 cl::desc("poison undef temps"),
153 cl::Hidden, cl::init(true));
155 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
156 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
157 cl::Hidden, cl::init(true));
159 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
160 cl::desc("exact handling of relational integer ICmp"),
161 cl::Hidden, cl::init(false));
163 // This flag controls whether we check the shadow of the address
164 // operand of load or store. Such bugs are very rare, since load from
165 // a garbage address typically results in SEGV, but still happen
166 // (e.g. only lower bits of address are garbage, or the access happens
167 // early at program startup where malloc-ed memory is more likely to
168 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
169 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
170 cl::desc("report accesses through a pointer which has poisoned shadow"),
171 cl::Hidden, cl::init(true));
173 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
174 cl::desc("print out instructions with default strict semantics"),
175 cl::Hidden, cl::init(false));
177 static cl::opt<std::string> ClBlacklistFile("msan-blacklist",
178 cl::desc("File containing the list of functions where MemorySanitizer "
179 "should not report bugs"), cl::Hidden);
181 // Experimental. Wraps all indirect calls in the instrumented code with
182 // a call to the given function. This is needed to assist the dynamic
183 // helper tool (MSanDR) to regain control on transition between instrumented and
184 // non-instrumented code.
185 static cl::opt<std::string> ClWrapIndirectCalls("msan-wrap-indirect-calls",
186 cl::desc("Wrap indirect calls with a given function"),
189 static cl::opt<bool> ClWrapIndirectCallsFast("msan-wrap-indirect-calls-fast",
190 cl::desc("Do not wrap indirect calls with target in the same module"),
191 cl::Hidden, cl::init(true));
195 /// \brief An instrumentation pass implementing detection of uninitialized
198 /// MemorySanitizer: instrument the code in module to find
199 /// uninitialized reads.
200 class MemorySanitizer : public FunctionPass {
202 MemorySanitizer(int TrackOrigins = 0,
203 StringRef BlacklistFile = StringRef())
205 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
208 BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile),
209 WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
210 const char *getPassName() const override { return "MemorySanitizer"; }
211 bool runOnFunction(Function &F) override;
212 bool doInitialization(Module &M) override;
213 static char ID; // Pass identification, replacement for typeid.
216 void initializeCallbacks(Module &M);
218 /// \brief Track origins (allocation points) of uninitialized values.
221 const DataLayout *DL;
225 /// \brief Thread-local shadow storage for function parameters.
226 GlobalVariable *ParamTLS;
227 /// \brief Thread-local origin storage for function parameters.
228 GlobalVariable *ParamOriginTLS;
229 /// \brief Thread-local shadow storage for function return value.
230 GlobalVariable *RetvalTLS;
231 /// \brief Thread-local origin storage for function return value.
232 GlobalVariable *RetvalOriginTLS;
233 /// \brief Thread-local shadow storage for in-register va_arg function
234 /// parameters (x86_64-specific).
235 GlobalVariable *VAArgTLS;
236 /// \brief Thread-local shadow storage for va_arg overflow area
237 /// (x86_64-specific).
238 GlobalVariable *VAArgOverflowSizeTLS;
239 /// \brief Thread-local space used to pass origin value to the UMR reporting
241 GlobalVariable *OriginTLS;
243 GlobalVariable *MsandrModuleStart;
244 GlobalVariable *MsandrModuleEnd;
246 /// \brief The run-time callback to print a warning.
248 /// \brief Run-time helper that generates a new origin value for a stack
250 Value *MsanSetAllocaOrigin4Fn;
251 /// \brief Run-time helper that poisons stack on function entry.
252 Value *MsanPoisonStackFn;
253 /// \brief Run-time helper that records a store (or any event) of an
254 /// uninitialized value and returns an updated origin id encoding this info.
255 Value *MsanChainOriginFn;
256 /// \brief MSan runtime replacements for memmove, memcpy and memset.
257 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
259 /// \brief Address mask used in application-to-shadow address calculation.
260 /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask.
262 /// \brief Offset of the origin shadow from the "normal" shadow.
263 /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL
264 uint64_t OriginOffset;
265 /// \brief Branch weights for error reporting.
266 MDNode *ColdCallWeights;
267 /// \brief Branch weights for origin store.
268 MDNode *OriginStoreWeights;
269 /// \brief Path to blacklist file.
270 SmallString<64> BlacklistFile;
271 /// \brief The blacklist.
272 std::unique_ptr<SpecialCaseList> BL;
273 /// \brief An empty volatile inline asm that prevents callback merge.
276 bool WrapIndirectCalls;
277 /// \brief Run-time wrapper for indirect calls.
278 Value *IndirectCallWrapperFn;
279 // Argument and return type of IndirectCallWrapperFn: void (*f)(void).
280 Type *AnyFunctionPtrTy;
282 friend struct MemorySanitizerVisitor;
283 friend struct VarArgAMD64Helper;
287 char MemorySanitizer::ID = 0;
288 INITIALIZE_PASS(MemorySanitizer, "msan",
289 "MemorySanitizer: detects uninitialized reads.",
292 FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins,
293 StringRef BlacklistFile) {
294 return new MemorySanitizer(TrackOrigins, BlacklistFile);
297 /// \brief Create a non-const global initialized with the given string.
299 /// Creates a writable global for Str so that we can pass it to the
300 /// run-time lib. Runtime uses first 4 bytes of the string to store the
301 /// frame ID, so the string needs to be mutable.
302 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
304 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
305 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
306 GlobalValue::PrivateLinkage, StrConst, "");
310 /// \brief Insert extern declaration of runtime-provided functions and globals.
311 void MemorySanitizer::initializeCallbacks(Module &M) {
312 // Only do this once.
317 // Create the callback.
318 // FIXME: this function should have "Cold" calling conv,
319 // which is not yet implemented.
320 StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
321 : "__msan_warning_noreturn";
322 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL);
324 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
325 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
326 IRB.getInt8PtrTy(), IntptrTy, NULL);
327 MsanPoisonStackFn = M.getOrInsertFunction(
328 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
329 MsanChainOriginFn = M.getOrInsertFunction(
330 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), NULL);
331 MemmoveFn = M.getOrInsertFunction(
332 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
333 IRB.getInt8PtrTy(), IntptrTy, NULL);
334 MemcpyFn = M.getOrInsertFunction(
335 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
337 MemsetFn = M.getOrInsertFunction(
338 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
342 RetvalTLS = new GlobalVariable(
343 M, ArrayType::get(IRB.getInt64Ty(), 8), false,
344 GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0,
345 GlobalVariable::InitialExecTLSModel);
346 RetvalOriginTLS = new GlobalVariable(
347 M, OriginTy, false, GlobalVariable::ExternalLinkage, 0,
348 "__msan_retval_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
350 ParamTLS = new GlobalVariable(
351 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
352 GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0,
353 GlobalVariable::InitialExecTLSModel);
354 ParamOriginTLS = new GlobalVariable(
355 M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
356 0, "__msan_param_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
358 VAArgTLS = new GlobalVariable(
359 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
360 GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0,
361 GlobalVariable::InitialExecTLSModel);
362 VAArgOverflowSizeTLS = new GlobalVariable(
363 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0,
364 "__msan_va_arg_overflow_size_tls", 0,
365 GlobalVariable::InitialExecTLSModel);
366 OriginTLS = new GlobalVariable(
367 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0,
368 "__msan_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
370 // We insert an empty inline asm after __msan_report* to avoid callback merge.
371 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
372 StringRef(""), StringRef(""),
373 /*hasSideEffects=*/true);
375 if (WrapIndirectCalls) {
377 PointerType::getUnqual(FunctionType::get(IRB.getVoidTy(), false));
378 IndirectCallWrapperFn = M.getOrInsertFunction(
379 ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL);
382 if (ClWrapIndirectCallsFast) {
383 MsandrModuleStart = new GlobalVariable(
384 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
385 0, "__executable_start");
386 MsandrModuleStart->setVisibility(GlobalVariable::HiddenVisibility);
387 MsandrModuleEnd = new GlobalVariable(
388 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
390 MsandrModuleEnd->setVisibility(GlobalVariable::HiddenVisibility);
394 /// \brief Module-level initialization.
396 /// inserts a call to __msan_init to the module's constructor list.
397 bool MemorySanitizer::doInitialization(Module &M) {
398 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
401 DL = &DLP->getDataLayout();
403 BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
404 C = &(M.getContext());
405 unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0);
408 ShadowMask = kShadowMask64;
409 OriginOffset = kOriginOffset64;
412 ShadowMask = kShadowMask32;
413 OriginOffset = kOriginOffset32;
416 report_fatal_error("unsupported pointer size");
421 IntptrTy = IRB.getIntPtrTy(DL);
422 OriginTy = IRB.getInt32Ty();
424 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
425 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
427 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
428 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
429 "__msan_init", IRB.getVoidTy(), NULL)), 0);
432 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
433 IRB.getInt32(TrackOrigins), "__msan_track_origins");
436 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
437 IRB.getInt32(ClKeepGoing), "__msan_keep_going");
444 /// \brief A helper class that handles instrumentation of VarArg
445 /// functions on a particular platform.
447 /// Implementations are expected to insert the instrumentation
448 /// necessary to propagate argument shadow through VarArg function
449 /// calls. Visit* methods are called during an InstVisitor pass over
450 /// the function, and should avoid creating new basic blocks. A new
451 /// instance of this class is created for each instrumented function.
452 struct VarArgHelper {
453 /// \brief Visit a CallSite.
454 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
456 /// \brief Visit a va_start call.
457 virtual void visitVAStartInst(VAStartInst &I) = 0;
459 /// \brief Visit a va_copy call.
460 virtual void visitVACopyInst(VACopyInst &I) = 0;
462 /// \brief Finalize function instrumentation.
464 /// This method is called after visiting all interesting (see above)
465 /// instructions in a function.
466 virtual void finalizeInstrumentation() = 0;
468 virtual ~VarArgHelper() {}
471 struct MemorySanitizerVisitor;
474 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
475 MemorySanitizerVisitor &Visitor);
477 /// This class does all the work for a given function. Store and Load
478 /// instructions store and load corresponding shadow and origin
479 /// values. Most instructions propagate shadow from arguments to their
480 /// return values. Certain instructions (most importantly, BranchInst)
481 /// test their argument shadow and print reports (with a runtime call) if it's
483 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
486 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
487 ValueMap<Value*, Value*> ShadowMap, OriginMap;
488 std::unique_ptr<VarArgHelper> VAHelper;
490 // The following flags disable parts of MSan instrumentation based on
491 // blacklist contents and command-line options.
496 bool CheckReturnValue;
498 struct ShadowOriginAndInsertPoint {
501 Instruction *OrigIns;
502 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
503 : Shadow(S), Origin(O), OrigIns(I) { }
505 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
506 SmallVector<Instruction*, 16> StoreList;
507 SmallVector<CallSite, 16> IndirectCallList;
509 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
510 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
511 bool SanitizeFunction = !MS.BL->isIn(F) && F.getAttributes().hasAttribute(
512 AttributeSet::FunctionIndex,
513 Attribute::SanitizeMemory);
514 InsertChecks = SanitizeFunction;
515 LoadShadow = SanitizeFunction;
516 PoisonStack = SanitizeFunction && ClPoisonStack;
517 PoisonUndef = SanitizeFunction && ClPoisonUndef;
518 // FIXME: Consider using SpecialCaseList to specify a list of functions that
519 // must always return fully initialized values. For now, we hardcode "main".
520 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
522 DEBUG(if (!InsertChecks)
523 dbgs() << "MemorySanitizer is not inserting checks into '"
524 << F.getName() << "'\n");
527 Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
528 if (MS.TrackOrigins <= 1) return V;
529 return IRB.CreateCall(MS.MsanChainOriginFn, V);
532 void materializeStores() {
533 for (size_t i = 0, n = StoreList.size(); i < n; i++) {
534 StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]);
537 Value *Val = I.getValueOperand();
538 Value *Addr = I.getPointerOperand();
539 Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
540 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
543 IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment());
544 DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
547 if (ClCheckAccessAddress)
548 insertShadowCheck(Addr, &I);
551 I.setOrdering(addReleaseOrdering(I.getOrdering()));
553 if (MS.TrackOrigins) {
554 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
555 if (isa<StructType>(Shadow->getType())) {
556 IRB.CreateAlignedStore(updateOrigin(getOrigin(Val), IRB),
557 getOriginPtr(Addr, IRB), Alignment);
559 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
561 // TODO(eugenis): handle non-zero constant shadow by inserting an
562 // unconditional check (can not simply fail compilation as this could
563 // be in the dead code).
564 if (isa<Constant>(ConvertedShadow))
567 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
568 getCleanShadow(ConvertedShadow), "_mscmp");
569 Instruction *CheckTerm =
570 SplitBlockAndInsertIfThen(Cmp, &I, false, MS.OriginStoreWeights);
571 IRBuilder<> IRBNew(CheckTerm);
572 IRBNew.CreateAlignedStore(updateOrigin(getOrigin(Val), IRBNew),
573 getOriginPtr(Addr, IRBNew), Alignment);
579 void materializeChecks() {
580 for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
581 Value *Shadow = InstrumentationList[i].Shadow;
582 Instruction *OrigIns = InstrumentationList[i].OrigIns;
583 IRBuilder<> IRB(OrigIns);
584 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
585 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
586 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
587 // See the comment in materializeStores().
588 if (isa<Constant>(ConvertedShadow))
590 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
591 getCleanShadow(ConvertedShadow), "_mscmp");
592 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
594 /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights);
596 IRB.SetInsertPoint(CheckTerm);
597 if (MS.TrackOrigins) {
598 Value *Origin = InstrumentationList[i].Origin;
599 IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0),
602 IRB.CreateCall(MS.WarningFn);
603 IRB.CreateCall(MS.EmptyAsm);
604 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
606 DEBUG(dbgs() << "DONE:\n" << F);
609 void materializeIndirectCalls() {
610 for (size_t i = 0, n = IndirectCallList.size(); i < n; i++) {
611 CallSite CS = IndirectCallList[i];
612 Instruction *I = CS.getInstruction();
613 BasicBlock *B = I->getParent();
615 Value *Fn0 = CS.getCalledValue();
616 Value *Fn = IRB.CreateBitCast(Fn0, MS.AnyFunctionPtrTy);
618 if (ClWrapIndirectCallsFast) {
619 // Check that call target is inside this module limits.
621 IRB.CreateBitCast(MS.MsandrModuleStart, MS.AnyFunctionPtrTy);
622 Value *End = IRB.CreateBitCast(MS.MsandrModuleEnd, MS.AnyFunctionPtrTy);
624 Value *NotInThisModule = IRB.CreateOr(IRB.CreateICmpULT(Fn, Start),
625 IRB.CreateICmpUGE(Fn, End));
628 IRB.CreatePHI(Fn0->getType(), 2, "msandr.indirect_target");
630 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
631 NotInThisModule, NewFnPhi,
632 /* Unreachable */ false, MS.ColdCallWeights);
634 IRB.SetInsertPoint(CheckTerm);
635 // Slow path: call wrapper function to possibly transform the call
637 Value *NewFn = IRB.CreateBitCast(
638 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType());
640 NewFnPhi->addIncoming(Fn0, B);
641 NewFnPhi->addIncoming(NewFn, dyn_cast<Instruction>(NewFn)->getParent());
642 CS.setCalledFunction(NewFnPhi);
644 Value *NewFn = IRB.CreateBitCast(
645 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType());
646 CS.setCalledFunction(NewFn);
651 /// \brief Add MemorySanitizer instrumentation to a function.
652 bool runOnFunction() {
653 MS.initializeCallbacks(*F.getParent());
654 if (!MS.DL) return false;
656 // In the presence of unreachable blocks, we may see Phi nodes with
657 // incoming nodes from such blocks. Since InstVisitor skips unreachable
658 // blocks, such nodes will not have any shadow value associated with them.
659 // It's easier to remove unreachable blocks than deal with missing shadow.
660 removeUnreachableBlocks(F);
662 // Iterate all BBs in depth-first order and create shadow instructions
663 // for all instructions (where applicable).
664 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
665 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
666 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
667 BasicBlock *BB = *DI;
671 // Finalize PHI nodes.
672 for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
673 PHINode *PN = ShadowPHINodes[i];
674 PHINode *PNS = cast<PHINode>(getShadow(PN));
675 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0;
676 size_t NumValues = PN->getNumIncomingValues();
677 for (size_t v = 0; v < NumValues; v++) {
678 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
680 PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
684 VAHelper->finalizeInstrumentation();
686 // Delayed instrumentation of StoreInst.
687 // This may add new checks to be inserted later.
690 // Insert shadow value checks.
693 // Wrap indirect calls.
694 materializeIndirectCalls();
699 /// \brief Compute the shadow type that corresponds to a given Value.
700 Type *getShadowTy(Value *V) {
701 return getShadowTy(V->getType());
704 /// \brief Compute the shadow type that corresponds to a given Type.
705 Type *getShadowTy(Type *OrigTy) {
706 if (!OrigTy->isSized()) {
709 // For integer type, shadow is the same as the original type.
710 // This may return weird-sized types like i1.
711 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
713 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
714 uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType());
715 return VectorType::get(IntegerType::get(*MS.C, EltSize),
716 VT->getNumElements());
718 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
719 SmallVector<Type*, 4> Elements;
720 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
721 Elements.push_back(getShadowTy(ST->getElementType(i)));
722 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
723 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
726 uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy);
727 return IntegerType::get(*MS.C, TypeSize);
730 /// \brief Flatten a vector type.
731 Type *getShadowTyNoVec(Type *ty) {
732 if (VectorType *vt = dyn_cast<VectorType>(ty))
733 return IntegerType::get(*MS.C, vt->getBitWidth());
737 /// \brief Convert a shadow value to it's flattened variant.
738 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
739 Type *Ty = V->getType();
740 Type *NoVecTy = getShadowTyNoVec(Ty);
741 if (Ty == NoVecTy) return V;
742 return IRB.CreateBitCast(V, NoVecTy);
745 /// \brief Compute the shadow address that corresponds to a given application
748 /// Shadow = Addr & ~ShadowMask.
749 Value *getShadowPtr(Value *Addr, Type *ShadowTy,
752 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
753 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
754 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
757 /// \brief Compute the origin address that corresponds to a given application
760 /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL
761 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) {
763 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
764 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
766 IRB.CreateAdd(ShadowLong,
767 ConstantInt::get(MS.IntptrTy, MS.OriginOffset));
769 IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL));
770 return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0));
773 /// \brief Compute the shadow address for a given function argument.
775 /// Shadow = ParamTLS+ArgOffset.
776 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
778 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
779 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
780 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
784 /// \brief Compute the origin address for a given function argument.
785 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
787 if (!MS.TrackOrigins) return 0;
788 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
789 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
790 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
794 /// \brief Compute the shadow address for a retval.
795 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
796 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
797 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
801 /// \brief Compute the origin address for a retval.
802 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
803 // We keep a single origin for the entire retval. Might be too optimistic.
804 return MS.RetvalOriginTLS;
807 /// \brief Set SV to be the shadow value for V.
808 void setShadow(Value *V, Value *SV) {
809 assert(!ShadowMap.count(V) && "Values may only have one shadow");
813 /// \brief Set Origin to be the origin value for V.
814 void setOrigin(Value *V, Value *Origin) {
815 if (!MS.TrackOrigins) return;
816 assert(!OriginMap.count(V) && "Values may only have one origin");
817 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
818 OriginMap[V] = Origin;
821 /// \brief Create a clean shadow value for a given value.
823 /// Clean shadow (all zeroes) means all bits of the value are defined
825 Constant *getCleanShadow(Value *V) {
826 Type *ShadowTy = getShadowTy(V);
829 return Constant::getNullValue(ShadowTy);
832 /// \brief Create a dirty shadow of a given shadow type.
833 Constant *getPoisonedShadow(Type *ShadowTy) {
835 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
836 return Constant::getAllOnesValue(ShadowTy);
837 StructType *ST = cast<StructType>(ShadowTy);
838 SmallVector<Constant *, 4> Vals;
839 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
840 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
841 return ConstantStruct::get(ST, Vals);
844 /// \brief Create a dirty shadow for a given value.
845 Constant *getPoisonedShadow(Value *V) {
846 Type *ShadowTy = getShadowTy(V);
849 return getPoisonedShadow(ShadowTy);
852 /// \brief Create a clean (zero) origin.
853 Value *getCleanOrigin() {
854 return Constant::getNullValue(MS.OriginTy);
857 /// \brief Get the shadow value for a given Value.
859 /// This function either returns the value set earlier with setShadow,
860 /// or extracts if from ParamTLS (for function arguments).
861 Value *getShadow(Value *V) {
862 if (Instruction *I = dyn_cast<Instruction>(V)) {
863 // For instructions the shadow is already stored in the map.
864 Value *Shadow = ShadowMap[V];
866 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
868 assert(Shadow && "No shadow for a value");
872 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
873 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
874 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
878 if (Argument *A = dyn_cast<Argument>(V)) {
879 // For arguments we compute the shadow on demand and store it in the map.
880 Value **ShadowPtr = &ShadowMap[V];
883 Function *F = A->getParent();
884 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
885 unsigned ArgOffset = 0;
886 for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
888 if (!AI->getType()->isSized()) {
889 DEBUG(dbgs() << "Arg is not sized\n");
892 unsigned Size = AI->hasByValAttr()
893 ? MS.DL->getTypeAllocSize(AI->getType()->getPointerElementType())
894 : MS.DL->getTypeAllocSize(AI->getType());
896 Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
897 if (AI->hasByValAttr()) {
898 // ByVal pointer itself has clean shadow. We copy the actual
899 // argument shadow to the underlying memory.
900 // Figure out maximal valid memcpy alignment.
901 unsigned ArgAlign = AI->getParamAlignment();
903 Type *EltType = A->getType()->getPointerElementType();
904 ArgAlign = MS.DL->getABITypeAlignment(EltType);
906 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
907 Value *Cpy = EntryIRB.CreateMemCpy(
908 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
910 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
912 *ShadowPtr = getCleanShadow(V);
914 *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
916 DEBUG(dbgs() << " ARG: " << *AI << " ==> " <<
917 **ShadowPtr << "\n");
918 if (MS.TrackOrigins) {
919 Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset);
920 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
923 ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment);
925 assert(*ShadowPtr && "Could not find shadow for an argument");
928 // For everything else the shadow is zero.
929 return getCleanShadow(V);
932 /// \brief Get the shadow for i-th argument of the instruction I.
933 Value *getShadow(Instruction *I, int i) {
934 return getShadow(I->getOperand(i));
937 /// \brief Get the origin for a value.
938 Value *getOrigin(Value *V) {
939 if (!MS.TrackOrigins) return 0;
940 if (isa<Instruction>(V) || isa<Argument>(V)) {
941 Value *Origin = OriginMap[V];
943 DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n");
944 Origin = getCleanOrigin();
948 return getCleanOrigin();
951 /// \brief Get the origin for i-th argument of the instruction I.
952 Value *getOrigin(Instruction *I, int i) {
953 return getOrigin(I->getOperand(i));
956 /// \brief Remember the place where a shadow check should be inserted.
958 /// This location will be later instrumented with a check that will print a
959 /// UMR warning in runtime if the shadow value is not 0.
960 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
962 if (!InsertChecks) return;
964 Type *ShadowTy = Shadow->getType();
965 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
966 "Can only insert checks for integer and vector shadow types");
968 InstrumentationList.push_back(
969 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
972 /// \brief Remember the place where a shadow check should be inserted.
974 /// This location will be later instrumented with a check that will print a
975 /// UMR warning in runtime if the value is not fully defined.
976 void insertShadowCheck(Value *Val, Instruction *OrigIns) {
978 Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
980 Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
981 insertShadowCheck(Shadow, Origin, OrigIns);
984 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
994 return AcquireRelease;
995 case SequentiallyConsistent:
996 return SequentiallyConsistent;
998 llvm_unreachable("Unknown ordering");
1001 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1010 case AcquireRelease:
1011 return AcquireRelease;
1012 case SequentiallyConsistent:
1013 return SequentiallyConsistent;
1015 llvm_unreachable("Unknown ordering");
1018 // ------------------- Visitors.
1020 /// \brief Instrument LoadInst
1022 /// Loads the corresponding shadow and (optionally) origin.
1023 /// Optionally, checks that the load address is fully defined.
1024 void visitLoadInst(LoadInst &I) {
1025 assert(I.getType()->isSized() && "Load type must have size");
1026 IRBuilder<> IRB(I.getNextNode());
1027 Type *ShadowTy = getShadowTy(&I);
1028 Value *Addr = I.getPointerOperand();
1030 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1032 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
1034 setShadow(&I, getCleanShadow(&I));
1037 if (ClCheckAccessAddress)
1038 insertShadowCheck(I.getPointerOperand(), &I);
1041 I.setOrdering(addAcquireOrdering(I.getOrdering()));
1043 if (MS.TrackOrigins) {
1045 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
1047 IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
1049 setOrigin(&I, getCleanOrigin());
1054 /// \brief Instrument StoreInst
1056 /// Stores the corresponding shadow and (optionally) origin.
1057 /// Optionally, checks that the store address is fully defined.
1058 void visitStoreInst(StoreInst &I) {
1059 StoreList.push_back(&I);
1062 void handleCASOrRMW(Instruction &I) {
1063 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1065 IRBuilder<> IRB(&I);
1066 Value *Addr = I.getOperand(0);
1067 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
1069 if (ClCheckAccessAddress)
1070 insertShadowCheck(Addr, &I);
1072 // Only test the conditional argument of cmpxchg instruction.
1073 // The other argument can potentially be uninitialized, but we can not
1074 // detect this situation reliably without possible false positives.
1075 if (isa<AtomicCmpXchgInst>(I))
1076 insertShadowCheck(I.getOperand(1), &I);
1078 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1080 setShadow(&I, getCleanShadow(&I));
1083 void visitAtomicRMWInst(AtomicRMWInst &I) {
1085 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1088 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1090 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
1093 // Vector manipulation.
1094 void visitExtractElementInst(ExtractElementInst &I) {
1095 insertShadowCheck(I.getOperand(1), &I);
1096 IRBuilder<> IRB(&I);
1097 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1099 setOrigin(&I, getOrigin(&I, 0));
1102 void visitInsertElementInst(InsertElementInst &I) {
1103 insertShadowCheck(I.getOperand(2), &I);
1104 IRBuilder<> IRB(&I);
1105 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1106 I.getOperand(2), "_msprop"));
1107 setOriginForNaryOp(I);
1110 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1111 insertShadowCheck(I.getOperand(2), &I);
1112 IRBuilder<> IRB(&I);
1113 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1114 I.getOperand(2), "_msprop"));
1115 setOriginForNaryOp(I);
1119 void visitSExtInst(SExtInst &I) {
1120 IRBuilder<> IRB(&I);
1121 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1122 setOrigin(&I, getOrigin(&I, 0));
1125 void visitZExtInst(ZExtInst &I) {
1126 IRBuilder<> IRB(&I);
1127 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1128 setOrigin(&I, getOrigin(&I, 0));
1131 void visitTruncInst(TruncInst &I) {
1132 IRBuilder<> IRB(&I);
1133 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1134 setOrigin(&I, getOrigin(&I, 0));
1137 void visitBitCastInst(BitCastInst &I) {
1138 IRBuilder<> IRB(&I);
1139 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1140 setOrigin(&I, getOrigin(&I, 0));
1143 void visitPtrToIntInst(PtrToIntInst &I) {
1144 IRBuilder<> IRB(&I);
1145 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1146 "_msprop_ptrtoint"));
1147 setOrigin(&I, getOrigin(&I, 0));
1150 void visitIntToPtrInst(IntToPtrInst &I) {
1151 IRBuilder<> IRB(&I);
1152 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1153 "_msprop_inttoptr"));
1154 setOrigin(&I, getOrigin(&I, 0));
1157 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1158 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1159 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1160 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1161 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1162 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1164 /// \brief Propagate shadow for bitwise AND.
1166 /// This code is exact, i.e. if, for example, a bit in the left argument
1167 /// is defined and 0, then neither the value not definedness of the
1168 /// corresponding bit in B don't affect the resulting shadow.
1169 void visitAnd(BinaryOperator &I) {
1170 IRBuilder<> IRB(&I);
1171 // "And" of 0 and a poisoned value results in unpoisoned value.
1172 // 1&1 => 1; 0&1 => 0; p&1 => p;
1173 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1174 // 1&p => p; 0&p => 0; p&p => p;
1175 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1176 Value *S1 = getShadow(&I, 0);
1177 Value *S2 = getShadow(&I, 1);
1178 Value *V1 = I.getOperand(0);
1179 Value *V2 = I.getOperand(1);
1180 if (V1->getType() != S1->getType()) {
1181 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1182 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1184 Value *S1S2 = IRB.CreateAnd(S1, S2);
1185 Value *V1S2 = IRB.CreateAnd(V1, S2);
1186 Value *S1V2 = IRB.CreateAnd(S1, V2);
1187 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1188 setOriginForNaryOp(I);
1191 void visitOr(BinaryOperator &I) {
1192 IRBuilder<> IRB(&I);
1193 // "Or" of 1 and a poisoned value results in unpoisoned value.
1194 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1195 // 1|0 => 1; 0|0 => 0; p|0 => p;
1196 // 1|p => 1; 0|p => p; p|p => p;
1197 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1198 Value *S1 = getShadow(&I, 0);
1199 Value *S2 = getShadow(&I, 1);
1200 Value *V1 = IRB.CreateNot(I.getOperand(0));
1201 Value *V2 = IRB.CreateNot(I.getOperand(1));
1202 if (V1->getType() != S1->getType()) {
1203 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1204 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1206 Value *S1S2 = IRB.CreateAnd(S1, S2);
1207 Value *V1S2 = IRB.CreateAnd(V1, S2);
1208 Value *S1V2 = IRB.CreateAnd(S1, V2);
1209 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1210 setOriginForNaryOp(I);
1213 /// \brief Default propagation of shadow and/or origin.
1215 /// This class implements the general case of shadow propagation, used in all
1216 /// cases where we don't know and/or don't care about what the operation
1217 /// actually does. It converts all input shadow values to a common type
1218 /// (extending or truncating as necessary), and bitwise OR's them.
1220 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1221 /// fully initialized), and less prone to false positives.
1223 /// This class also implements the general case of origin propagation. For a
1224 /// Nary operation, result origin is set to the origin of an argument that is
1225 /// not entirely initialized. If there is more than one such arguments, the
1226 /// rightmost of them is picked. It does not matter which one is picked if all
1227 /// arguments are initialized.
1228 template <bool CombineShadow>
1233 MemorySanitizerVisitor *MSV;
1236 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
1237 Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {}
1239 /// \brief Add a pair of shadow and origin values to the mix.
1240 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1241 if (CombineShadow) {
1246 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1247 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1251 if (MSV->MS.TrackOrigins) {
1256 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1257 Value *Cond = IRB.CreateICmpNE(FlatShadow,
1258 MSV->getCleanShadow(FlatShadow));
1259 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1265 /// \brief Add an application value to the mix.
1266 Combiner &Add(Value *V) {
1267 Value *OpShadow = MSV->getShadow(V);
1268 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0;
1269 return Add(OpShadow, OpOrigin);
1272 /// \brief Set the current combined values as the given instruction's shadow
1274 void Done(Instruction *I) {
1275 if (CombineShadow) {
1277 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1278 MSV->setShadow(I, Shadow);
1280 if (MSV->MS.TrackOrigins) {
1282 MSV->setOrigin(I, Origin);
1287 typedef Combiner<true> ShadowAndOriginCombiner;
1288 typedef Combiner<false> OriginCombiner;
1290 /// \brief Propagate origin for arbitrary operation.
1291 void setOriginForNaryOp(Instruction &I) {
1292 if (!MS.TrackOrigins) return;
1293 IRBuilder<> IRB(&I);
1294 OriginCombiner OC(this, IRB);
1295 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1300 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1301 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1302 "Vector of pointers is not a valid shadow type");
1303 return Ty->isVectorTy() ?
1304 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1305 Ty->getPrimitiveSizeInBits();
1308 /// \brief Cast between two shadow types, extending or truncating as
1310 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
1311 bool Signed = false) {
1312 Type *srcTy = V->getType();
1313 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1314 return IRB.CreateIntCast(V, dstTy, Signed);
1315 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1316 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1317 return IRB.CreateIntCast(V, dstTy, Signed);
1318 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1319 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1320 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1322 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
1323 return IRB.CreateBitCast(V2, dstTy);
1324 // TODO: handle struct types.
1327 /// \brief Propagate shadow for arbitrary operation.
1328 void handleShadowOr(Instruction &I) {
1329 IRBuilder<> IRB(&I);
1330 ShadowAndOriginCombiner SC(this, IRB);
1331 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1336 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1337 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1338 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1339 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1340 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1341 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1342 void visitMul(BinaryOperator &I) { handleShadowOr(I); }
1344 void handleDiv(Instruction &I) {
1345 IRBuilder<> IRB(&I);
1346 // Strict on the second argument.
1347 insertShadowCheck(I.getOperand(1), &I);
1348 setShadow(&I, getShadow(&I, 0));
1349 setOrigin(&I, getOrigin(&I, 0));
1352 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1353 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1354 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1355 void visitURem(BinaryOperator &I) { handleDiv(I); }
1356 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1357 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1359 /// \brief Instrument == and != comparisons.
1361 /// Sometimes the comparison result is known even if some of the bits of the
1362 /// arguments are not.
1363 void handleEqualityComparison(ICmpInst &I) {
1364 IRBuilder<> IRB(&I);
1365 Value *A = I.getOperand(0);
1366 Value *B = I.getOperand(1);
1367 Value *Sa = getShadow(A);
1368 Value *Sb = getShadow(B);
1370 // Get rid of pointers and vectors of pointers.
1371 // For ints (and vectors of ints), types of A and Sa match,
1372 // and this is a no-op.
1373 A = IRB.CreatePointerCast(A, Sa->getType());
1374 B = IRB.CreatePointerCast(B, Sb->getType());
1376 // A == B <==> (C = A^B) == 0
1377 // A != B <==> (C = A^B) != 0
1379 Value *C = IRB.CreateXor(A, B);
1380 Value *Sc = IRB.CreateOr(Sa, Sb);
1381 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1382 // Result is defined if one of the following is true
1383 // * there is a defined 1 bit in C
1384 // * C is fully defined
1385 // Si = !(C & ~Sc) && Sc
1386 Value *Zero = Constant::getNullValue(Sc->getType());
1387 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1389 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1391 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1392 Si->setName("_msprop_icmp");
1394 setOriginForNaryOp(I);
1397 /// \brief Build the lowest possible value of V, taking into account V's
1398 /// uninitialized bits.
1399 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1402 // Split shadow into sign bit and other bits.
1403 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1404 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1405 // Maximise the undefined shadow bit, minimize other undefined bits.
1407 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1409 // Minimize undefined bits.
1410 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1414 /// \brief Build the highest possible value of V, taking into account V's
1415 /// uninitialized bits.
1416 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1419 // Split shadow into sign bit and other bits.
1420 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1421 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1422 // Minimise the undefined shadow bit, maximise other undefined bits.
1424 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1426 // Maximize undefined bits.
1427 return IRB.CreateOr(A, Sa);
1431 /// \brief Instrument relational comparisons.
1433 /// This function does exact shadow propagation for all relational
1434 /// comparisons of integers, pointers and vectors of those.
1435 /// FIXME: output seems suboptimal when one of the operands is a constant
1436 void handleRelationalComparisonExact(ICmpInst &I) {
1437 IRBuilder<> IRB(&I);
1438 Value *A = I.getOperand(0);
1439 Value *B = I.getOperand(1);
1440 Value *Sa = getShadow(A);
1441 Value *Sb = getShadow(B);
1443 // Get rid of pointers and vectors of pointers.
1444 // For ints (and vectors of ints), types of A and Sa match,
1445 // and this is a no-op.
1446 A = IRB.CreatePointerCast(A, Sa->getType());
1447 B = IRB.CreatePointerCast(B, Sb->getType());
1449 // Let [a0, a1] be the interval of possible values of A, taking into account
1450 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1451 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1452 bool IsSigned = I.isSigned();
1453 Value *S1 = IRB.CreateICmp(I.getPredicate(),
1454 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1455 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1456 Value *S2 = IRB.CreateICmp(I.getPredicate(),
1457 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1458 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1459 Value *Si = IRB.CreateXor(S1, S2);
1461 setOriginForNaryOp(I);
1464 /// \brief Instrument signed relational comparisons.
1466 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by
1467 /// propagating the highest bit of the shadow. Everything else is delegated
1468 /// to handleShadowOr().
1469 void handleSignedRelationalComparison(ICmpInst &I) {
1470 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1471 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1473 CmpInst::Predicate pre = I.getPredicate();
1474 if (constOp0 && constOp0->isNullValue() &&
1475 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
1476 op = I.getOperand(1);
1477 } else if (constOp1 && constOp1->isNullValue() &&
1478 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) {
1479 op = I.getOperand(0);
1482 IRBuilder<> IRB(&I);
1484 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt");
1485 setShadow(&I, Shadow);
1486 setOrigin(&I, getOrigin(op));
1492 void visitICmpInst(ICmpInst &I) {
1493 if (!ClHandleICmp) {
1497 if (I.isEquality()) {
1498 handleEqualityComparison(I);
1502 assert(I.isRelational());
1503 if (ClHandleICmpExact) {
1504 handleRelationalComparisonExact(I);
1508 handleSignedRelationalComparison(I);
1512 assert(I.isUnsigned());
1513 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
1514 handleRelationalComparisonExact(I);
1521 void visitFCmpInst(FCmpInst &I) {
1525 void handleShift(BinaryOperator &I) {
1526 IRBuilder<> IRB(&I);
1527 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1528 // Otherwise perform the same shift on S1.
1529 Value *S1 = getShadow(&I, 0);
1530 Value *S2 = getShadow(&I, 1);
1531 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1533 Value *V2 = I.getOperand(1);
1534 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1535 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1536 setOriginForNaryOp(I);
1539 void visitShl(BinaryOperator &I) { handleShift(I); }
1540 void visitAShr(BinaryOperator &I) { handleShift(I); }
1541 void visitLShr(BinaryOperator &I) { handleShift(I); }
1543 /// \brief Instrument llvm.memmove
1545 /// At this point we don't know if llvm.memmove will be inlined or not.
1546 /// If we don't instrument it and it gets inlined,
1547 /// our interceptor will not kick in and we will lose the memmove.
1548 /// If we instrument the call here, but it does not get inlined,
1549 /// we will memove the shadow twice: which is bad in case
1550 /// of overlapping regions. So, we simply lower the intrinsic to a call.
1552 /// Similar situation exists for memcpy and memset.
1553 void visitMemMoveInst(MemMoveInst &I) {
1554 IRBuilder<> IRB(&I);
1557 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1558 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1559 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1560 I.eraseFromParent();
1563 // Similar to memmove: avoid copying shadow twice.
1564 // This is somewhat unfortunate as it may slowdown small constant memcpys.
1565 // FIXME: consider doing manual inline for small constant sizes and proper
1567 void visitMemCpyInst(MemCpyInst &I) {
1568 IRBuilder<> IRB(&I);
1571 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1572 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1573 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1574 I.eraseFromParent();
1578 void visitMemSetInst(MemSetInst &I) {
1579 IRBuilder<> IRB(&I);
1582 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1583 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1584 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1585 I.eraseFromParent();
1588 void visitVAStartInst(VAStartInst &I) {
1589 VAHelper->visitVAStartInst(I);
1592 void visitVACopyInst(VACopyInst &I) {
1593 VAHelper->visitVACopyInst(I);
1596 enum IntrinsicKind {
1597 IK_DoesNotAccessMemory,
1602 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) {
1603 const int DoesNotAccessMemory = IK_DoesNotAccessMemory;
1604 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory;
1605 const int OnlyReadsMemory = IK_OnlyReadsMemory;
1606 const int OnlyAccessesArgumentPointees = IK_WritesMemory;
1607 const int UnknownModRefBehavior = IK_WritesMemory;
1608 #define GET_INTRINSIC_MODREF_BEHAVIOR
1609 #define ModRefBehavior IntrinsicKind
1610 #include "llvm/IR/Intrinsics.gen"
1611 #undef ModRefBehavior
1612 #undef GET_INTRINSIC_MODREF_BEHAVIOR
1615 /// \brief Handle vector store-like intrinsics.
1617 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1618 /// has 1 pointer argument and 1 vector argument, returns void.
1619 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1620 IRBuilder<> IRB(&I);
1621 Value* Addr = I.getArgOperand(0);
1622 Value *Shadow = getShadow(&I, 1);
1623 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1625 // We don't know the pointer alignment (could be unaligned SSE store!).
1626 // Have to assume to worst case.
1627 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1629 if (ClCheckAccessAddress)
1630 insertShadowCheck(Addr, &I);
1632 // FIXME: use ClStoreCleanOrigin
1633 // FIXME: factor out common code from materializeStores
1634 if (MS.TrackOrigins)
1635 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB));
1639 /// \brief Handle vector load-like intrinsics.
1641 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1642 /// has 1 pointer argument, returns a vector.
1643 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1644 IRBuilder<> IRB(&I);
1645 Value *Addr = I.getArgOperand(0);
1647 Type *ShadowTy = getShadowTy(&I);
1649 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1650 // We don't know the pointer alignment (could be unaligned SSE load!).
1651 // Have to assume to worst case.
1652 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1654 setShadow(&I, getCleanShadow(&I));
1657 if (ClCheckAccessAddress)
1658 insertShadowCheck(Addr, &I);
1660 if (MS.TrackOrigins) {
1662 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
1664 setOrigin(&I, getCleanOrigin());
1669 /// \brief Handle (SIMD arithmetic)-like intrinsics.
1671 /// Instrument intrinsics with any number of arguments of the same type,
1672 /// equal to the return type. The type should be simple (no aggregates or
1673 /// pointers; vectors are fine).
1674 /// Caller guarantees that this intrinsic does not access memory.
1675 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1676 Type *RetTy = I.getType();
1677 if (!(RetTy->isIntOrIntVectorTy() ||
1678 RetTy->isFPOrFPVectorTy() ||
1679 RetTy->isX86_MMXTy()))
1682 unsigned NumArgOperands = I.getNumArgOperands();
1684 for (unsigned i = 0; i < NumArgOperands; ++i) {
1685 Type *Ty = I.getArgOperand(i)->getType();
1690 IRBuilder<> IRB(&I);
1691 ShadowAndOriginCombiner SC(this, IRB);
1692 for (unsigned i = 0; i < NumArgOperands; ++i)
1693 SC.Add(I.getArgOperand(i));
1699 /// \brief Heuristically instrument unknown intrinsics.
1701 /// The main purpose of this code is to do something reasonable with all
1702 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
1703 /// We recognize several classes of intrinsics by their argument types and
1704 /// ModRefBehaviour and apply special intrumentation when we are reasonably
1705 /// sure that we know what the intrinsic does.
1707 /// We special-case intrinsics where this approach fails. See llvm.bswap
1708 /// handling as an example of that.
1709 bool handleUnknownIntrinsic(IntrinsicInst &I) {
1710 unsigned NumArgOperands = I.getNumArgOperands();
1711 if (NumArgOperands == 0)
1714 Intrinsic::ID iid = I.getIntrinsicID();
1715 IntrinsicKind IK = getIntrinsicKind(iid);
1716 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory;
1717 bool WritesMemory = IK == IK_WritesMemory;
1718 assert(!(OnlyReadsMemory && WritesMemory));
1720 if (NumArgOperands == 2 &&
1721 I.getArgOperand(0)->getType()->isPointerTy() &&
1722 I.getArgOperand(1)->getType()->isVectorTy() &&
1723 I.getType()->isVoidTy() &&
1725 // This looks like a vector store.
1726 return handleVectorStoreIntrinsic(I);
1729 if (NumArgOperands == 1 &&
1730 I.getArgOperand(0)->getType()->isPointerTy() &&
1731 I.getType()->isVectorTy() &&
1733 // This looks like a vector load.
1734 return handleVectorLoadIntrinsic(I);
1737 if (!OnlyReadsMemory && !WritesMemory)
1738 if (maybeHandleSimpleNomemIntrinsic(I))
1741 // FIXME: detect and handle SSE maskstore/maskload
1745 void handleBswap(IntrinsicInst &I) {
1746 IRBuilder<> IRB(&I);
1747 Value *Op = I.getArgOperand(0);
1748 Type *OpType = Op->getType();
1749 Function *BswapFunc = Intrinsic::getDeclaration(
1750 F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1));
1751 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
1752 setOrigin(&I, getOrigin(Op));
1755 // \brief Instrument vector convert instrinsic.
1757 // This function instruments intrinsics like cvtsi2ss:
1758 // %Out = int_xxx_cvtyyy(%ConvertOp)
1760 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
1761 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
1762 // number \p Out elements, and (if has 2 arguments) copies the rest of the
1763 // elements from \p CopyOp.
1764 // In most cases conversion involves floating-point value which may trigger a
1765 // hardware exception when not fully initialized. For this reason we require
1766 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
1767 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
1768 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
1769 // return a fully initialized value.
1770 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
1771 IRBuilder<> IRB(&I);
1772 Value *CopyOp, *ConvertOp;
1774 switch (I.getNumArgOperands()) {
1776 CopyOp = I.getArgOperand(0);
1777 ConvertOp = I.getArgOperand(1);
1780 ConvertOp = I.getArgOperand(0);
1784 llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
1787 // The first *NumUsedElements* elements of ConvertOp are converted to the
1788 // same number of output elements. The rest of the output is copied from
1789 // CopyOp, or (if not available) filled with zeroes.
1790 // Combine shadow for elements of ConvertOp that are used in this operation,
1791 // and insert a check.
1792 // FIXME: consider propagating shadow of ConvertOp, at least in the case of
1793 // int->any conversion.
1794 Value *ConvertShadow = getShadow(ConvertOp);
1795 Value *AggShadow = 0;
1796 if (ConvertOp->getType()->isVectorTy()) {
1797 AggShadow = IRB.CreateExtractElement(
1798 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
1799 for (int i = 1; i < NumUsedElements; ++i) {
1800 Value *MoreShadow = IRB.CreateExtractElement(
1801 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
1802 AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
1805 AggShadow = ConvertShadow;
1807 assert(AggShadow->getType()->isIntegerTy());
1808 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
1810 // Build result shadow by zero-filling parts of CopyOp shadow that come from
1813 assert(CopyOp->getType() == I.getType());
1814 assert(CopyOp->getType()->isVectorTy());
1815 Value *ResultShadow = getShadow(CopyOp);
1816 Type *EltTy = ResultShadow->getType()->getVectorElementType();
1817 for (int i = 0; i < NumUsedElements; ++i) {
1818 ResultShadow = IRB.CreateInsertElement(
1819 ResultShadow, ConstantInt::getNullValue(EltTy),
1820 ConstantInt::get(IRB.getInt32Ty(), i));
1822 setShadow(&I, ResultShadow);
1823 setOrigin(&I, getOrigin(CopyOp));
1825 setShadow(&I, getCleanShadow(&I));
1829 // Given a scalar or vector, extract lower 64 bits (or less), and return all
1830 // zeroes if it is zero, and all ones otherwise.
1831 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
1832 if (S->getType()->isVectorTy())
1833 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
1834 assert(S->getType()->getPrimitiveSizeInBits() <= 64);
1835 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
1836 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
1839 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
1840 Type *T = S->getType();
1841 assert(T->isVectorTy());
1842 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
1843 return IRB.CreateSExt(S2, T);
1846 // \brief Instrument vector shift instrinsic.
1848 // This function instruments intrinsics like int_x86_avx2_psll_w.
1849 // Intrinsic shifts %In by %ShiftSize bits.
1850 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
1851 // size, and the rest is ignored. Behavior is defined even if shift size is
1852 // greater than register (or field) width.
1853 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
1854 assert(I.getNumArgOperands() == 2);
1855 IRBuilder<> IRB(&I);
1856 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1857 // Otherwise perform the same shift on S1.
1858 Value *S1 = getShadow(&I, 0);
1859 Value *S2 = getShadow(&I, 1);
1860 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
1861 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
1862 Value *V1 = I.getOperand(0);
1863 Value *V2 = I.getOperand(1);
1864 Value *Shift = IRB.CreateCall2(I.getCalledValue(),
1865 IRB.CreateBitCast(S1, V1->getType()), V2);
1866 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
1867 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1868 setOriginForNaryOp(I);
1871 void visitIntrinsicInst(IntrinsicInst &I) {
1872 switch (I.getIntrinsicID()) {
1873 case llvm::Intrinsic::bswap:
1876 case llvm::Intrinsic::x86_avx512_cvtsd2usi64:
1877 case llvm::Intrinsic::x86_avx512_cvtsd2usi:
1878 case llvm::Intrinsic::x86_avx512_cvtss2usi64:
1879 case llvm::Intrinsic::x86_avx512_cvtss2usi:
1880 case llvm::Intrinsic::x86_avx512_cvttss2usi64:
1881 case llvm::Intrinsic::x86_avx512_cvttss2usi:
1882 case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
1883 case llvm::Intrinsic::x86_avx512_cvttsd2usi:
1884 case llvm::Intrinsic::x86_avx512_cvtusi2sd:
1885 case llvm::Intrinsic::x86_avx512_cvtusi2ss:
1886 case llvm::Intrinsic::x86_avx512_cvtusi642sd:
1887 case llvm::Intrinsic::x86_avx512_cvtusi642ss:
1888 case llvm::Intrinsic::x86_sse2_cvtsd2si64:
1889 case llvm::Intrinsic::x86_sse2_cvtsd2si:
1890 case llvm::Intrinsic::x86_sse2_cvtsd2ss:
1891 case llvm::Intrinsic::x86_sse2_cvtsi2sd:
1892 case llvm::Intrinsic::x86_sse2_cvtsi642sd:
1893 case llvm::Intrinsic::x86_sse2_cvtss2sd:
1894 case llvm::Intrinsic::x86_sse2_cvttsd2si64:
1895 case llvm::Intrinsic::x86_sse2_cvttsd2si:
1896 case llvm::Intrinsic::x86_sse_cvtsi2ss:
1897 case llvm::Intrinsic::x86_sse_cvtsi642ss:
1898 case llvm::Intrinsic::x86_sse_cvtss2si64:
1899 case llvm::Intrinsic::x86_sse_cvtss2si:
1900 case llvm::Intrinsic::x86_sse_cvttss2si64:
1901 case llvm::Intrinsic::x86_sse_cvttss2si:
1902 handleVectorConvertIntrinsic(I, 1);
1904 case llvm::Intrinsic::x86_sse2_cvtdq2pd:
1905 case llvm::Intrinsic::x86_sse2_cvtps2pd:
1906 case llvm::Intrinsic::x86_sse_cvtps2pi:
1907 case llvm::Intrinsic::x86_sse_cvttps2pi:
1908 handleVectorConvertIntrinsic(I, 2);
1910 case llvm::Intrinsic::x86_avx512_psll_dq:
1911 case llvm::Intrinsic::x86_avx512_psrl_dq:
1912 case llvm::Intrinsic::x86_avx2_psll_w:
1913 case llvm::Intrinsic::x86_avx2_psll_d:
1914 case llvm::Intrinsic::x86_avx2_psll_q:
1915 case llvm::Intrinsic::x86_avx2_pslli_w:
1916 case llvm::Intrinsic::x86_avx2_pslli_d:
1917 case llvm::Intrinsic::x86_avx2_pslli_q:
1918 case llvm::Intrinsic::x86_avx2_psll_dq:
1919 case llvm::Intrinsic::x86_avx2_psrl_w:
1920 case llvm::Intrinsic::x86_avx2_psrl_d:
1921 case llvm::Intrinsic::x86_avx2_psrl_q:
1922 case llvm::Intrinsic::x86_avx2_psra_w:
1923 case llvm::Intrinsic::x86_avx2_psra_d:
1924 case llvm::Intrinsic::x86_avx2_psrli_w:
1925 case llvm::Intrinsic::x86_avx2_psrli_d:
1926 case llvm::Intrinsic::x86_avx2_psrli_q:
1927 case llvm::Intrinsic::x86_avx2_psrai_w:
1928 case llvm::Intrinsic::x86_avx2_psrai_d:
1929 case llvm::Intrinsic::x86_avx2_psrl_dq:
1930 case llvm::Intrinsic::x86_sse2_psll_w:
1931 case llvm::Intrinsic::x86_sse2_psll_d:
1932 case llvm::Intrinsic::x86_sse2_psll_q:
1933 case llvm::Intrinsic::x86_sse2_pslli_w:
1934 case llvm::Intrinsic::x86_sse2_pslli_d:
1935 case llvm::Intrinsic::x86_sse2_pslli_q:
1936 case llvm::Intrinsic::x86_sse2_psll_dq:
1937 case llvm::Intrinsic::x86_sse2_psrl_w:
1938 case llvm::Intrinsic::x86_sse2_psrl_d:
1939 case llvm::Intrinsic::x86_sse2_psrl_q:
1940 case llvm::Intrinsic::x86_sse2_psra_w:
1941 case llvm::Intrinsic::x86_sse2_psra_d:
1942 case llvm::Intrinsic::x86_sse2_psrli_w:
1943 case llvm::Intrinsic::x86_sse2_psrli_d:
1944 case llvm::Intrinsic::x86_sse2_psrli_q:
1945 case llvm::Intrinsic::x86_sse2_psrai_w:
1946 case llvm::Intrinsic::x86_sse2_psrai_d:
1947 case llvm::Intrinsic::x86_sse2_psrl_dq:
1948 case llvm::Intrinsic::x86_mmx_psll_w:
1949 case llvm::Intrinsic::x86_mmx_psll_d:
1950 case llvm::Intrinsic::x86_mmx_psll_q:
1951 case llvm::Intrinsic::x86_mmx_pslli_w:
1952 case llvm::Intrinsic::x86_mmx_pslli_d:
1953 case llvm::Intrinsic::x86_mmx_pslli_q:
1954 case llvm::Intrinsic::x86_mmx_psrl_w:
1955 case llvm::Intrinsic::x86_mmx_psrl_d:
1956 case llvm::Intrinsic::x86_mmx_psrl_q:
1957 case llvm::Intrinsic::x86_mmx_psra_w:
1958 case llvm::Intrinsic::x86_mmx_psra_d:
1959 case llvm::Intrinsic::x86_mmx_psrli_w:
1960 case llvm::Intrinsic::x86_mmx_psrli_d:
1961 case llvm::Intrinsic::x86_mmx_psrli_q:
1962 case llvm::Intrinsic::x86_mmx_psrai_w:
1963 case llvm::Intrinsic::x86_mmx_psrai_d:
1964 handleVectorShiftIntrinsic(I, /* Variable */ false);
1966 case llvm::Intrinsic::x86_avx2_psllv_d:
1967 case llvm::Intrinsic::x86_avx2_psllv_d_256:
1968 case llvm::Intrinsic::x86_avx2_psllv_q:
1969 case llvm::Intrinsic::x86_avx2_psllv_q_256:
1970 case llvm::Intrinsic::x86_avx2_psrlv_d:
1971 case llvm::Intrinsic::x86_avx2_psrlv_d_256:
1972 case llvm::Intrinsic::x86_avx2_psrlv_q:
1973 case llvm::Intrinsic::x86_avx2_psrlv_q_256:
1974 case llvm::Intrinsic::x86_avx2_psrav_d:
1975 case llvm::Intrinsic::x86_avx2_psrav_d_256:
1976 handleVectorShiftIntrinsic(I, /* Variable */ true);
1979 // Byte shifts are not implemented.
1980 // case llvm::Intrinsic::x86_avx512_psll_dq_bs:
1981 // case llvm::Intrinsic::x86_avx512_psrl_dq_bs:
1982 // case llvm::Intrinsic::x86_avx2_psll_dq_bs:
1983 // case llvm::Intrinsic::x86_avx2_psrl_dq_bs:
1984 // case llvm::Intrinsic::x86_sse2_psll_dq_bs:
1985 // case llvm::Intrinsic::x86_sse2_psrl_dq_bs:
1988 if (!handleUnknownIntrinsic(I))
1989 visitInstruction(I);
1994 void visitCallSite(CallSite CS) {
1995 Instruction &I = *CS.getInstruction();
1996 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
1998 CallInst *Call = cast<CallInst>(&I);
2000 // For inline asm, do the usual thing: check argument shadow and mark all
2001 // outputs as clean. Note that any side effects of the inline asm that are
2002 // not immediately visible in its constraints are not handled.
2003 if (Call->isInlineAsm()) {
2004 visitInstruction(I);
2008 // Allow only tail calls with the same types, otherwise
2009 // we may have a false positive: shadow for a non-void RetVal
2010 // will get propagated to a void RetVal.
2011 if (Call->isTailCall() && Call->getType() != Call->getParent()->getType())
2012 Call->setTailCall(false);
2014 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
2016 // We are going to insert code that relies on the fact that the callee
2017 // will become a non-readonly function after it is instrumented by us. To
2018 // prevent this code from being optimized out, mark that function
2019 // non-readonly in advance.
2020 if (Function *Func = Call->getCalledFunction()) {
2021 // Clear out readonly/readnone attributes.
2023 B.addAttribute(Attribute::ReadOnly)
2024 .addAttribute(Attribute::ReadNone);
2025 Func->removeAttributes(AttributeSet::FunctionIndex,
2026 AttributeSet::get(Func->getContext(),
2027 AttributeSet::FunctionIndex,
2031 IRBuilder<> IRB(&I);
2033 if (MS.WrapIndirectCalls && !CS.getCalledFunction())
2034 IndirectCallList.push_back(CS);
2036 unsigned ArgOffset = 0;
2037 DEBUG(dbgs() << " CallSite: " << I << "\n");
2038 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2039 ArgIt != End; ++ArgIt) {
2041 unsigned i = ArgIt - CS.arg_begin();
2042 if (!A->getType()->isSized()) {
2043 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
2048 // Compute the Shadow for arg even if it is ByVal, because
2049 // in that case getShadow() will copy the actual arg shadow to
2050 // __msan_param_tls.
2051 Value *ArgShadow = getShadow(A);
2052 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
2053 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
2054 " Shadow: " << *ArgShadow << "\n");
2055 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
2056 assert(A->getType()->isPointerTy() &&
2057 "ByVal argument is not a pointer!");
2058 Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());
2059 unsigned Alignment = CS.getParamAlignment(i + 1);
2060 Store = IRB.CreateMemCpy(ArgShadowBase,
2061 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
2064 Size = MS.DL->getTypeAllocSize(A->getType());
2065 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
2066 kShadowTLSAlignment);
2068 if (MS.TrackOrigins)
2069 IRB.CreateStore(getOrigin(A),
2070 getOriginPtrForArgument(A, IRB, ArgOffset));
2072 assert(Size != 0 && Store != 0);
2073 DEBUG(dbgs() << " Param:" << *Store << "\n");
2074 ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
2076 DEBUG(dbgs() << " done with call args\n");
2079 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
2080 if (FT->isVarArg()) {
2081 VAHelper->visitCallSite(CS, IRB);
2084 // Now, get the shadow for the RetVal.
2085 if (!I.getType()->isSized()) return;
2086 IRBuilder<> IRBBefore(&I);
2087 // Until we have full dynamic coverage, make sure the retval shadow is 0.
2088 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
2089 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
2090 Instruction *NextInsn = 0;
2092 NextInsn = I.getNextNode();
2094 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
2095 if (!NormalDest->getSinglePredecessor()) {
2096 // FIXME: this case is tricky, so we are just conservative here.
2097 // Perhaps we need to split the edge between this BB and NormalDest,
2098 // but a naive attempt to use SplitEdge leads to a crash.
2099 setShadow(&I, getCleanShadow(&I));
2100 setOrigin(&I, getCleanOrigin());
2103 NextInsn = NormalDest->getFirstInsertionPt();
2105 "Could not find insertion point for retval shadow load");
2107 IRBuilder<> IRBAfter(NextInsn);
2108 Value *RetvalShadow =
2109 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
2110 kShadowTLSAlignment, "_msret");
2111 setShadow(&I, RetvalShadow);
2112 if (MS.TrackOrigins)
2113 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
2116 void visitReturnInst(ReturnInst &I) {
2117 IRBuilder<> IRB(&I);
2118 Value *RetVal = I.getReturnValue();
2119 if (!RetVal) return;
2120 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
2121 if (CheckReturnValue) {
2122 insertShadowCheck(RetVal, &I);
2123 Value *Shadow = getCleanShadow(RetVal);
2124 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2126 Value *Shadow = getShadow(RetVal);
2127 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2128 // FIXME: make it conditional if ClStoreCleanOrigin==0
2129 if (MS.TrackOrigins)
2130 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
2134 void visitPHINode(PHINode &I) {
2135 IRBuilder<> IRB(&I);
2136 ShadowPHINodes.push_back(&I);
2137 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
2139 if (MS.TrackOrigins)
2140 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
2144 void visitAllocaInst(AllocaInst &I) {
2145 setShadow(&I, getCleanShadow(&I));
2146 IRBuilder<> IRB(I.getNextNode());
2147 uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType());
2148 if (PoisonStack && ClPoisonStackWithCall) {
2149 IRB.CreateCall2(MS.MsanPoisonStackFn,
2150 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2151 ConstantInt::get(MS.IntptrTy, Size));
2153 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
2154 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
2155 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment());
2158 if (PoisonStack && MS.TrackOrigins) {
2159 setOrigin(&I, getCleanOrigin());
2160 SmallString<2048> StackDescriptionStorage;
2161 raw_svector_ostream StackDescription(StackDescriptionStorage);
2162 // We create a string with a description of the stack allocation and
2163 // pass it into __msan_set_alloca_origin.
2164 // It will be printed by the run-time if stack-originated UMR is found.
2165 // The first 4 bytes of the string are set to '----' and will be replaced
2166 // by __msan_va_arg_overflow_size_tls at the first call.
2167 StackDescription << "----" << I.getName() << "@" << F.getName();
2169 createPrivateNonConstGlobalForString(*F.getParent(),
2170 StackDescription.str());
2172 IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn,
2173 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2174 ConstantInt::get(MS.IntptrTy, Size),
2175 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
2176 IRB.CreatePointerCast(&F, MS.IntptrTy));
2180 void visitSelectInst(SelectInst& I) {
2181 IRBuilder<> IRB(&I);
2182 // a = select b, c, d
2183 Value *S = IRB.CreateSelect(I.getCondition(), getShadow(I.getTrueValue()),
2184 getShadow(I.getFalseValue()));
2185 if (I.getType()->isAggregateType()) {
2186 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
2187 // an extra "select". This results in much more compact IR.
2188 // Sa = select Sb, poisoned, (select b, Sc, Sd)
2189 S = IRB.CreateSelect(getShadow(I.getCondition()),
2190 getPoisonedShadow(getShadowTy(I.getType())), S,
2191 "_msprop_select_agg");
2193 // Sa = (sext Sb) | (select b, Sc, Sd)
2194 S = IRB.CreateOr(S, CreateShadowCast(IRB, getShadow(I.getCondition()),
2195 S->getType(), true),
2199 if (MS.TrackOrigins) {
2200 // Origins are always i32, so any vector conditions must be flattened.
2201 // FIXME: consider tracking vector origins for app vectors?
2202 Value *Cond = I.getCondition();
2203 Value *CondShadow = getShadow(Cond);
2204 if (Cond->getType()->isVectorTy()) {
2205 Type *FlatTy = getShadowTyNoVec(Cond->getType());
2206 Cond = IRB.CreateICmpNE(IRB.CreateBitCast(Cond, FlatTy),
2207 ConstantInt::getNullValue(FlatTy));
2208 CondShadow = IRB.CreateICmpNE(IRB.CreateBitCast(CondShadow, FlatTy),
2209 ConstantInt::getNullValue(FlatTy));
2211 // a = select b, c, d
2212 // Oa = Sb ? Ob : (b ? Oc : Od)
2213 setOrigin(&I, IRB.CreateSelect(
2214 CondShadow, getOrigin(I.getCondition()),
2215 IRB.CreateSelect(Cond, getOrigin(I.getTrueValue()),
2216 getOrigin(I.getFalseValue()))));
2220 void visitLandingPadInst(LandingPadInst &I) {
2222 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
2223 setShadow(&I, getCleanShadow(&I));
2224 setOrigin(&I, getCleanOrigin());
2227 void visitGetElementPtrInst(GetElementPtrInst &I) {
2231 void visitExtractValueInst(ExtractValueInst &I) {
2232 IRBuilder<> IRB(&I);
2233 Value *Agg = I.getAggregateOperand();
2234 DEBUG(dbgs() << "ExtractValue: " << I << "\n");
2235 Value *AggShadow = getShadow(Agg);
2236 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2237 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2238 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
2239 setShadow(&I, ResShadow);
2240 setOriginForNaryOp(I);
2243 void visitInsertValueInst(InsertValueInst &I) {
2244 IRBuilder<> IRB(&I);
2245 DEBUG(dbgs() << "InsertValue: " << I << "\n");
2246 Value *AggShadow = getShadow(I.getAggregateOperand());
2247 Value *InsShadow = getShadow(I.getInsertedValueOperand());
2248 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2249 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
2250 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2251 DEBUG(dbgs() << " Res: " << *Res << "\n");
2253 setOriginForNaryOp(I);
2256 void dumpInst(Instruction &I) {
2257 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2258 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
2260 errs() << "ZZZ " << I.getOpcodeName() << "\n";
2262 errs() << "QQQ " << I << "\n";
2265 void visitResumeInst(ResumeInst &I) {
2266 DEBUG(dbgs() << "Resume: " << I << "\n");
2267 // Nothing to do here.
2270 void visitInstruction(Instruction &I) {
2271 // Everything else: stop propagating and check for poisoned shadow.
2272 if (ClDumpStrictInstructions)
2274 DEBUG(dbgs() << "DEFAULT: " << I << "\n");
2275 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
2276 insertShadowCheck(I.getOperand(i), &I);
2277 setShadow(&I, getCleanShadow(&I));
2278 setOrigin(&I, getCleanOrigin());
2282 /// \brief AMD64-specific implementation of VarArgHelper.
2283 struct VarArgAMD64Helper : public VarArgHelper {
2284 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
2285 // See a comment in visitCallSite for more details.
2286 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
2287 static const unsigned AMD64FpEndOffset = 176;
2290 MemorySanitizer &MS;
2291 MemorySanitizerVisitor &MSV;
2292 Value *VAArgTLSCopy;
2293 Value *VAArgOverflowSize;
2295 SmallVector<CallInst*, 16> VAStartInstrumentationList;
2297 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
2298 MemorySanitizerVisitor &MSV)
2299 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { }
2301 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
2303 ArgKind classifyArgument(Value* arg) {
2304 // A very rough approximation of X86_64 argument classification rules.
2305 Type *T = arg->getType();
2306 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
2307 return AK_FloatingPoint;
2308 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
2309 return AK_GeneralPurpose;
2310 if (T->isPointerTy())
2311 return AK_GeneralPurpose;
2315 // For VarArg functions, store the argument shadow in an ABI-specific format
2316 // that corresponds to va_list layout.
2317 // We do this because Clang lowers va_arg in the frontend, and this pass
2318 // only sees the low level code that deals with va_list internals.
2319 // A much easier alternative (provided that Clang emits va_arg instructions)
2320 // would have been to associate each live instance of va_list with a copy of
2321 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
2323 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
2324 unsigned GpOffset = 0;
2325 unsigned FpOffset = AMD64GpEndOffset;
2326 unsigned OverflowOffset = AMD64FpEndOffset;
2327 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2328 ArgIt != End; ++ArgIt) {
2330 unsigned ArgNo = CS.getArgumentNo(ArgIt);
2331 bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal);
2333 // ByVal arguments always go to the overflow area.
2334 assert(A->getType()->isPointerTy());
2335 Type *RealTy = A->getType()->getPointerElementType();
2336 uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy);
2337 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
2338 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
2339 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
2340 ArgSize, kShadowTLSAlignment);
2342 ArgKind AK = classifyArgument(A);
2343 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
2345 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
2349 case AK_GeneralPurpose:
2350 Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset);
2353 case AK_FloatingPoint:
2354 Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset);
2358 uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
2359 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
2360 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
2362 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
2365 Constant *OverflowSize =
2366 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
2367 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
2370 /// \brief Compute the shadow address for a given va_arg.
2371 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
2373 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
2374 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
2375 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
2379 void visitVAStartInst(VAStartInst &I) override {
2380 IRBuilder<> IRB(&I);
2381 VAStartInstrumentationList.push_back(&I);
2382 Value *VAListTag = I.getArgOperand(0);
2383 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2385 // Unpoison the whole __va_list_tag.
2386 // FIXME: magic ABI constants.
2387 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2388 /* size */24, /* alignment */8, false);
2391 void visitVACopyInst(VACopyInst &I) override {
2392 IRBuilder<> IRB(&I);
2393 Value *VAListTag = I.getArgOperand(0);
2394 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2396 // Unpoison the whole __va_list_tag.
2397 // FIXME: magic ABI constants.
2398 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2399 /* size */24, /* alignment */8, false);
2402 void finalizeInstrumentation() override {
2403 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
2404 "finalizeInstrumentation called twice");
2405 if (!VAStartInstrumentationList.empty()) {
2406 // If there is a va_start in this function, make a backup copy of
2407 // va_arg_tls somewhere in the function entry block.
2408 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
2409 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
2411 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
2413 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
2414 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
2417 // Instrument va_start.
2418 // Copy va_list shadow from the backup copy of the TLS contents.
2419 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
2420 CallInst *OrigInst = VAStartInstrumentationList[i];
2421 IRBuilder<> IRB(OrigInst->getNextNode());
2422 Value *VAListTag = OrigInst->getArgOperand(0);
2424 Value *RegSaveAreaPtrPtr =
2426 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2427 ConstantInt::get(MS.IntptrTy, 16)),
2428 Type::getInt64PtrTy(*MS.C));
2429 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
2430 Value *RegSaveAreaShadowPtr =
2431 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
2432 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
2433 AMD64FpEndOffset, 16);
2435 Value *OverflowArgAreaPtrPtr =
2437 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2438 ConstantInt::get(MS.IntptrTy, 8)),
2439 Type::getInt64PtrTy(*MS.C));
2440 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
2441 Value *OverflowArgAreaShadowPtr =
2442 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
2443 Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset);
2444 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
2449 /// \brief A no-op implementation of VarArgHelper.
2450 struct VarArgNoOpHelper : public VarArgHelper {
2451 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
2452 MemorySanitizerVisitor &MSV) {}
2454 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
2456 void visitVAStartInst(VAStartInst &I) override {}
2458 void visitVACopyInst(VACopyInst &I) override {}
2460 void finalizeInstrumentation() override {}
2463 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
2464 MemorySanitizerVisitor &Visitor) {
2465 // VarArg handling is only implemented on AMD64. False positives are possible
2466 // on other platforms.
2467 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
2468 if (TargetTriple.getArch() == llvm::Triple::x86_64)
2469 return new VarArgAMD64Helper(Func, Msan, Visitor);
2471 return new VarArgNoOpHelper(Func, Msan, Visitor);
2476 bool MemorySanitizer::runOnFunction(Function &F) {
2477 MemorySanitizerVisitor Visitor(F, *this);
2479 // Clear out readonly/readnone attributes.
2481 B.addAttribute(Attribute::ReadOnly)
2482 .addAttribute(Attribute::ReadNone);
2483 F.removeAttributes(AttributeSet::FunctionIndex,
2484 AttributeSet::get(F.getContext(),
2485 AttributeSet::FunctionIndex, B));
2487 return Visitor.runOnFunction();