1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
13 /// The algorithm of the tool is similar to Memcheck
14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
15 /// byte of the application memory, poison the shadow of the malloc-ed
16 /// or alloca-ed memory, load the shadow bits on every memory read,
17 /// propagate the shadow bits through some of the arithmetic
18 /// instruction (including MOV), store the shadow bits on every memory
19 /// write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwritting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
92 //===----------------------------------------------------------------------===//
94 #include "llvm/Transforms/Instrumentation.h"
95 #include "llvm/ADT/DepthFirstIterator.h"
96 #include "llvm/ADT/SmallString.h"
97 #include "llvm/ADT/SmallVector.h"
98 #include "llvm/ADT/StringExtras.h"
99 #include "llvm/ADT/Triple.h"
100 #include "llvm/IR/DataLayout.h"
101 #include "llvm/IR/Function.h"
102 #include "llvm/IR/IRBuilder.h"
103 #include "llvm/IR/InlineAsm.h"
104 #include "llvm/IR/InstVisitor.h"
105 #include "llvm/IR/IntrinsicInst.h"
106 #include "llvm/IR/LLVMContext.h"
107 #include "llvm/IR/MDBuilder.h"
108 #include "llvm/IR/Module.h"
109 #include "llvm/IR/Type.h"
110 #include "llvm/IR/ValueMap.h"
111 #include "llvm/Support/CommandLine.h"
112 #include "llvm/Support/Compiler.h"
113 #include "llvm/Support/Debug.h"
114 #include "llvm/Support/raw_ostream.h"
115 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
116 #include "llvm/Transforms/Utils/Local.h"
117 #include "llvm/Transforms/Utils/ModuleUtils.h"
119 using namespace llvm;
121 #define DEBUG_TYPE "msan"
123 static const unsigned kOriginSize = 4;
124 static const unsigned kMinOriginAlignment = 4;
125 static const unsigned kShadowTLSAlignment = 8;
127 // These constants must be kept in sync with the ones in msan.h.
128 static const unsigned kParamTLSSize = 800;
129 static const unsigned kRetvalTLSSize = 800;
131 // Accesses sizes are powers of two: 1, 2, 4, 8.
132 static const size_t kNumberOfAccessSizes = 4;
134 /// \brief Track origins of uninitialized values.
136 /// Adds a section to MemorySanitizer report that points to the allocation
137 /// (stack or heap) the uninitialized bits came from originally.
138 static cl::opt<int> ClTrackOrigins("msan-track-origins",
139 cl::desc("Track origins (allocation sites) of poisoned memory"),
140 cl::Hidden, cl::init(0));
141 static cl::opt<bool> ClKeepGoing("msan-keep-going",
142 cl::desc("keep going after reporting a UMR"),
143 cl::Hidden, cl::init(false));
144 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
145 cl::desc("poison uninitialized stack variables"),
146 cl::Hidden, cl::init(true));
147 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
148 cl::desc("poison uninitialized stack variables with a call"),
149 cl::Hidden, cl::init(false));
150 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
151 cl::desc("poison uninitialized stack variables with the given patter"),
152 cl::Hidden, cl::init(0xff));
153 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
154 cl::desc("poison undef temps"),
155 cl::Hidden, cl::init(true));
157 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
158 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
159 cl::Hidden, cl::init(true));
161 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
162 cl::desc("exact handling of relational integer ICmp"),
163 cl::Hidden, cl::init(false));
165 // This flag controls whether we check the shadow of the address
166 // operand of load or store. Such bugs are very rare, since load from
167 // a garbage address typically results in SEGV, but still happen
168 // (e.g. only lower bits of address are garbage, or the access happens
169 // early at program startup where malloc-ed memory is more likely to
170 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
171 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
172 cl::desc("report accesses through a pointer which has poisoned shadow"),
173 cl::Hidden, cl::init(true));
175 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
176 cl::desc("print out instructions with default strict semantics"),
177 cl::Hidden, cl::init(false));
179 static cl::opt<int> ClInstrumentationWithCallThreshold(
180 "msan-instrumentation-with-call-threshold",
182 "If the function being instrumented requires more than "
183 "this number of checks and origin stores, use callbacks instead of "
184 "inline checks (-1 means never use callbacks)."),
185 cl::Hidden, cl::init(3500));
187 // This is an experiment to enable handling of cases where shadow is a non-zero
188 // compile-time constant. For some unexplainable reason they were silently
189 // ignored in the instrumentation.
190 static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
191 cl::desc("Insert checks for constant shadow values"),
192 cl::Hidden, cl::init(false));
196 // Memory map parameters used in application-to-shadow address calculation.
197 // Offset = (Addr & ~AndMask) ^ XorMask
198 // Shadow = ShadowBase + Offset
199 // Origin = OriginBase + Offset
200 struct MemoryMapParams {
207 struct PlatformMemoryMapParams {
208 const MemoryMapParams *bits32;
209 const MemoryMapParams *bits64;
213 static const MemoryMapParams Linux_I386_MemoryMapParams = {
214 0x000080000000, // AndMask
215 0, // XorMask (not used)
216 0, // ShadowBase (not used)
217 0x000040000000, // OriginBase
221 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
222 0x400000000000, // AndMask
223 0, // XorMask (not used)
224 0, // ShadowBase (not used)
225 0x200000000000, // OriginBase
229 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
230 0x004000000000, // AndMask
231 0, // XorMask (not used)
232 0, // ShadowBase (not used)
233 0x002000000000, // OriginBase
237 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
238 0x000180000000, // AndMask
239 0x000040000000, // XorMask
240 0x000020000000, // ShadowBase
241 0x000700000000, // OriginBase
245 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
246 0xc00000000000, // AndMask
247 0x200000000000, // XorMask
248 0x100000000000, // ShadowBase
249 0x380000000000, // OriginBase
252 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
253 &Linux_I386_MemoryMapParams,
254 &Linux_X86_64_MemoryMapParams,
257 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
259 &Linux_MIPS64_MemoryMapParams,
262 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
263 &FreeBSD_I386_MemoryMapParams,
264 &FreeBSD_X86_64_MemoryMapParams,
267 /// \brief An instrumentation pass implementing detection of uninitialized
270 /// MemorySanitizer: instrument the code in module to find
271 /// uninitialized reads.
272 class MemorySanitizer : public FunctionPass {
274 MemorySanitizer(int TrackOrigins = 0)
276 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
278 WarningFn(nullptr) {}
279 const char *getPassName() const override { return "MemorySanitizer"; }
280 bool runOnFunction(Function &F) override;
281 bool doInitialization(Module &M) override;
282 static char ID; // Pass identification, replacement for typeid.
285 void initializeCallbacks(Module &M);
287 /// \brief Track origins (allocation points) of uninitialized values.
290 const DataLayout *DL;
294 /// \brief Thread-local shadow storage for function parameters.
295 GlobalVariable *ParamTLS;
296 /// \brief Thread-local origin storage for function parameters.
297 GlobalVariable *ParamOriginTLS;
298 /// \brief Thread-local shadow storage for function return value.
299 GlobalVariable *RetvalTLS;
300 /// \brief Thread-local origin storage for function return value.
301 GlobalVariable *RetvalOriginTLS;
302 /// \brief Thread-local shadow storage for in-register va_arg function
303 /// parameters (x86_64-specific).
304 GlobalVariable *VAArgTLS;
305 /// \brief Thread-local shadow storage for va_arg overflow area
306 /// (x86_64-specific).
307 GlobalVariable *VAArgOverflowSizeTLS;
308 /// \brief Thread-local space used to pass origin value to the UMR reporting
310 GlobalVariable *OriginTLS;
312 /// \brief The run-time callback to print a warning.
314 // These arrays are indexed by log2(AccessSize).
315 Value *MaybeWarningFn[kNumberOfAccessSizes];
316 Value *MaybeStoreOriginFn[kNumberOfAccessSizes];
318 /// \brief Run-time helper that generates a new origin value for a stack
320 Value *MsanSetAllocaOrigin4Fn;
321 /// \brief Run-time helper that poisons stack on function entry.
322 Value *MsanPoisonStackFn;
323 /// \brief Run-time helper that records a store (or any event) of an
324 /// uninitialized value and returns an updated origin id encoding this info.
325 Value *MsanChainOriginFn;
326 /// \brief MSan runtime replacements for memmove, memcpy and memset.
327 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
329 /// \brief Memory map parameters used in application-to-shadow calculation.
330 const MemoryMapParams *MapParams;
332 MDNode *ColdCallWeights;
333 /// \brief Branch weights for origin store.
334 MDNode *OriginStoreWeights;
335 /// \brief An empty volatile inline asm that prevents callback merge.
338 friend struct MemorySanitizerVisitor;
339 friend struct VarArgAMD64Helper;
343 char MemorySanitizer::ID = 0;
344 INITIALIZE_PASS(MemorySanitizer, "msan",
345 "MemorySanitizer: detects uninitialized reads.",
348 FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins) {
349 return new MemorySanitizer(TrackOrigins);
352 /// \brief Create a non-const global initialized with the given string.
354 /// Creates a writable global for Str so that we can pass it to the
355 /// run-time lib. Runtime uses first 4 bytes of the string to store the
356 /// frame ID, so the string needs to be mutable.
357 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
359 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
360 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
361 GlobalValue::PrivateLinkage, StrConst, "");
365 /// \brief Insert extern declaration of runtime-provided functions and globals.
366 void MemorySanitizer::initializeCallbacks(Module &M) {
367 // Only do this once.
372 // Create the callback.
373 // FIXME: this function should have "Cold" calling conv,
374 // which is not yet implemented.
375 StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
376 : "__msan_warning_noreturn";
377 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), nullptr);
379 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
381 unsigned AccessSize = 1 << AccessSizeIndex;
382 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
383 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
384 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
385 IRB.getInt32Ty(), nullptr);
387 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
388 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
389 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
390 IRB.getInt8PtrTy(), IRB.getInt32Ty(), nullptr);
393 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
394 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
395 IRB.getInt8PtrTy(), IntptrTy, nullptr);
397 M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
398 IRB.getInt8PtrTy(), IntptrTy, nullptr);
399 MsanChainOriginFn = M.getOrInsertFunction(
400 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), nullptr);
401 MemmoveFn = M.getOrInsertFunction(
402 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
403 IRB.getInt8PtrTy(), IntptrTy, nullptr);
404 MemcpyFn = M.getOrInsertFunction(
405 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
407 MemsetFn = M.getOrInsertFunction(
408 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
412 RetvalTLS = new GlobalVariable(
413 M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false,
414 GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr,
415 GlobalVariable::InitialExecTLSModel);
416 RetvalOriginTLS = new GlobalVariable(
417 M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr,
418 "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
420 ParamTLS = new GlobalVariable(
421 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
422 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr,
423 GlobalVariable::InitialExecTLSModel);
424 ParamOriginTLS = new GlobalVariable(
425 M, ArrayType::get(OriginTy, kParamTLSSize / 4), false,
426 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls",
427 nullptr, GlobalVariable::InitialExecTLSModel);
429 VAArgTLS = new GlobalVariable(
430 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
431 GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr,
432 GlobalVariable::InitialExecTLSModel);
433 VAArgOverflowSizeTLS = new GlobalVariable(
434 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
435 "__msan_va_arg_overflow_size_tls", nullptr,
436 GlobalVariable::InitialExecTLSModel);
437 OriginTLS = new GlobalVariable(
438 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
439 "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
441 // We insert an empty inline asm after __msan_report* to avoid callback merge.
442 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
443 StringRef(""), StringRef(""),
444 /*hasSideEffects=*/true);
447 /// \brief Module-level initialization.
449 /// inserts a call to __msan_init to the module's constructor list.
450 bool MemorySanitizer::doInitialization(Module &M) {
451 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
453 report_fatal_error("data layout missing");
454 DL = &DLP->getDataLayout();
456 Triple TargetTriple(M.getTargetTriple());
457 switch (TargetTriple.getOS()) {
458 case Triple::FreeBSD:
459 switch (TargetTriple.getArch()) {
461 MapParams = FreeBSD_X86_MemoryMapParams.bits64;
464 MapParams = FreeBSD_X86_MemoryMapParams.bits32;
467 report_fatal_error("unsupported architecture");
471 switch (TargetTriple.getArch()) {
473 MapParams = Linux_X86_MemoryMapParams.bits64;
476 MapParams = Linux_X86_MemoryMapParams.bits32;
479 case Triple::mips64el:
480 MapParams = Linux_MIPS_MemoryMapParams.bits64;
483 report_fatal_error("unsupported architecture");
487 report_fatal_error("unsupported operating system");
490 C = &(M.getContext());
492 IntptrTy = IRB.getIntPtrTy(DL);
493 OriginTy = IRB.getInt32Ty();
495 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
496 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
498 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
499 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
500 "__msan_init", IRB.getVoidTy(), nullptr)), 0);
503 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
504 IRB.getInt32(TrackOrigins), "__msan_track_origins");
507 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
508 IRB.getInt32(ClKeepGoing), "__msan_keep_going");
515 /// \brief A helper class that handles instrumentation of VarArg
516 /// functions on a particular platform.
518 /// Implementations are expected to insert the instrumentation
519 /// necessary to propagate argument shadow through VarArg function
520 /// calls. Visit* methods are called during an InstVisitor pass over
521 /// the function, and should avoid creating new basic blocks. A new
522 /// instance of this class is created for each instrumented function.
523 struct VarArgHelper {
524 /// \brief Visit a CallSite.
525 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
527 /// \brief Visit a va_start call.
528 virtual void visitVAStartInst(VAStartInst &I) = 0;
530 /// \brief Visit a va_copy call.
531 virtual void visitVACopyInst(VACopyInst &I) = 0;
533 /// \brief Finalize function instrumentation.
535 /// This method is called after visiting all interesting (see above)
536 /// instructions in a function.
537 virtual void finalizeInstrumentation() = 0;
539 virtual ~VarArgHelper() {}
542 struct MemorySanitizerVisitor;
545 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
546 MemorySanitizerVisitor &Visitor);
548 unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
549 if (TypeSize <= 8) return 0;
550 return Log2_32_Ceil(TypeSize / 8);
553 /// This class does all the work for a given function. Store and Load
554 /// instructions store and load corresponding shadow and origin
555 /// values. Most instructions propagate shadow from arguments to their
556 /// return values. Certain instructions (most importantly, BranchInst)
557 /// test their argument shadow and print reports (with a runtime call) if it's
559 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
562 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
563 ValueMap<Value*, Value*> ShadowMap, OriginMap;
564 std::unique_ptr<VarArgHelper> VAHelper;
566 // The following flags disable parts of MSan instrumentation based on
567 // blacklist contents and command-line options.
569 bool PropagateShadow;
572 bool CheckReturnValue;
574 struct ShadowOriginAndInsertPoint {
577 Instruction *OrigIns;
578 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
579 : Shadow(S), Origin(O), OrigIns(I) { }
581 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
582 SmallVector<Instruction*, 16> StoreList;
584 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
585 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
586 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
587 InsertChecks = SanitizeFunction;
588 PropagateShadow = SanitizeFunction;
589 PoisonStack = SanitizeFunction && ClPoisonStack;
590 PoisonUndef = SanitizeFunction && ClPoisonUndef;
591 // FIXME: Consider using SpecialCaseList to specify a list of functions that
592 // must always return fully initialized values. For now, we hardcode "main".
593 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
595 DEBUG(if (!InsertChecks)
596 dbgs() << "MemorySanitizer is not inserting checks into '"
597 << F.getName() << "'\n");
600 Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
601 if (MS.TrackOrigins <= 1) return V;
602 return IRB.CreateCall(MS.MsanChainOriginFn, V);
605 Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
606 unsigned IntptrSize = MS.DL->getTypeStoreSize(MS.IntptrTy);
607 if (IntptrSize == kOriginSize) return Origin;
608 assert(IntptrSize == kOriginSize * 2);
609 Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
610 return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
613 /// \brief Fill memory range with the given origin value.
614 void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
615 unsigned Size, unsigned Alignment) {
616 unsigned IntptrAlignment = MS.DL->getABITypeAlignment(MS.IntptrTy);
617 unsigned IntptrSize = MS.DL->getTypeStoreSize(MS.IntptrTy);
618 assert(IntptrAlignment >= kMinOriginAlignment);
619 assert(IntptrSize >= kOriginSize);
622 unsigned CurrentAlignment = Alignment;
623 if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
624 Value *IntptrOrigin = originToIntptr(IRB, Origin);
625 Value *IntptrOriginPtr =
626 IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
627 for (unsigned i = 0; i < Size / IntptrSize; ++i) {
629 i ? IRB.CreateConstGEP1_32(IntptrOriginPtr, i) : IntptrOriginPtr;
630 IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
631 Ofs += IntptrSize / kOriginSize;
632 CurrentAlignment = IntptrAlignment;
636 for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
637 Value *GEP = i ? IRB.CreateConstGEP1_32(OriginPtr, i) : OriginPtr;
638 IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
639 CurrentAlignment = kMinOriginAlignment;
643 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
644 unsigned Alignment, bool AsCall) {
645 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
646 unsigned StoreSize = MS.DL->getTypeStoreSize(Shadow->getType());
647 if (isa<StructType>(Shadow->getType())) {
648 paintOrigin(IRB, updateOrigin(Origin, IRB),
649 getOriginPtr(Addr, IRB, Alignment), StoreSize,
652 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
653 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
654 if (ConstantShadow) {
655 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
656 paintOrigin(IRB, updateOrigin(Origin, IRB),
657 getOriginPtr(Addr, IRB, Alignment), StoreSize,
662 unsigned TypeSizeInBits =
663 MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
664 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
665 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
666 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
667 Value *ConvertedShadow2 = IRB.CreateZExt(
668 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
669 IRB.CreateCall3(Fn, ConvertedShadow2,
670 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
673 Value *Cmp = IRB.CreateICmpNE(
674 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
675 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
676 Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
677 IRBuilder<> IRBNew(CheckTerm);
678 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew),
679 getOriginPtr(Addr, IRBNew, Alignment), StoreSize,
685 void materializeStores(bool InstrumentWithCalls) {
686 for (auto Inst : StoreList) {
687 StoreInst &SI = *dyn_cast<StoreInst>(Inst);
689 IRBuilder<> IRB(&SI);
690 Value *Val = SI.getValueOperand();
691 Value *Addr = SI.getPointerOperand();
692 Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
693 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
696 IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment());
697 DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
700 if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI);
702 if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
704 if (MS.TrackOrigins && !SI.isAtomic())
705 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI.getAlignment(),
706 InstrumentWithCalls);
710 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
712 IRBuilder<> IRB(OrigIns);
713 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
714 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
715 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
717 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
718 if (ConstantShadow) {
719 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
720 if (MS.TrackOrigins) {
721 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
724 IRB.CreateCall(MS.WarningFn);
725 IRB.CreateCall(MS.EmptyAsm);
726 // FIXME: Insert UnreachableInst if !ClKeepGoing?
727 // This may invalidate some of the following checks and needs to be done
733 unsigned TypeSizeInBits =
734 MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
735 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
736 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
737 Value *Fn = MS.MaybeWarningFn[SizeIndex];
738 Value *ConvertedShadow2 =
739 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
740 IRB.CreateCall2(Fn, ConvertedShadow2, MS.TrackOrigins && Origin
742 : (Value *)IRB.getInt32(0));
744 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
745 getCleanShadow(ConvertedShadow), "_mscmp");
746 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
748 /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights);
750 IRB.SetInsertPoint(CheckTerm);
751 if (MS.TrackOrigins) {
752 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
755 IRB.CreateCall(MS.WarningFn);
756 IRB.CreateCall(MS.EmptyAsm);
757 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
761 void materializeChecks(bool InstrumentWithCalls) {
762 for (const auto &ShadowData : InstrumentationList) {
763 Instruction *OrigIns = ShadowData.OrigIns;
764 Value *Shadow = ShadowData.Shadow;
765 Value *Origin = ShadowData.Origin;
766 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
768 DEBUG(dbgs() << "DONE:\n" << F);
771 /// \brief Add MemorySanitizer instrumentation to a function.
772 bool runOnFunction() {
773 MS.initializeCallbacks(*F.getParent());
774 if (!MS.DL) return false;
776 // In the presence of unreachable blocks, we may see Phi nodes with
777 // incoming nodes from such blocks. Since InstVisitor skips unreachable
778 // blocks, such nodes will not have any shadow value associated with them.
779 // It's easier to remove unreachable blocks than deal with missing shadow.
780 removeUnreachableBlocks(F);
782 // Iterate all BBs in depth-first order and create shadow instructions
783 // for all instructions (where applicable).
784 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
785 for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
789 // Finalize PHI nodes.
790 for (PHINode *PN : ShadowPHINodes) {
791 PHINode *PNS = cast<PHINode>(getShadow(PN));
792 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
793 size_t NumValues = PN->getNumIncomingValues();
794 for (size_t v = 0; v < NumValues; v++) {
795 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
796 if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
800 VAHelper->finalizeInstrumentation();
802 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
803 InstrumentationList.size() + StoreList.size() >
804 (unsigned)ClInstrumentationWithCallThreshold;
806 // Delayed instrumentation of StoreInst.
807 // This may add new checks to be inserted later.
808 materializeStores(InstrumentWithCalls);
810 // Insert shadow value checks.
811 materializeChecks(InstrumentWithCalls);
816 /// \brief Compute the shadow type that corresponds to a given Value.
817 Type *getShadowTy(Value *V) {
818 return getShadowTy(V->getType());
821 /// \brief Compute the shadow type that corresponds to a given Type.
822 Type *getShadowTy(Type *OrigTy) {
823 if (!OrigTy->isSized()) {
826 // For integer type, shadow is the same as the original type.
827 // This may return weird-sized types like i1.
828 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
830 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
831 uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType());
832 return VectorType::get(IntegerType::get(*MS.C, EltSize),
833 VT->getNumElements());
835 if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
836 return ArrayType::get(getShadowTy(AT->getElementType()),
837 AT->getNumElements());
839 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
840 SmallVector<Type*, 4> Elements;
841 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
842 Elements.push_back(getShadowTy(ST->getElementType(i)));
843 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
844 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
847 uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy);
848 return IntegerType::get(*MS.C, TypeSize);
851 /// \brief Flatten a vector type.
852 Type *getShadowTyNoVec(Type *ty) {
853 if (VectorType *vt = dyn_cast<VectorType>(ty))
854 return IntegerType::get(*MS.C, vt->getBitWidth());
858 /// \brief Convert a shadow value to it's flattened variant.
859 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
860 Type *Ty = V->getType();
861 Type *NoVecTy = getShadowTyNoVec(Ty);
862 if (Ty == NoVecTy) return V;
863 return IRB.CreateBitCast(V, NoVecTy);
866 /// \brief Compute the integer shadow offset that corresponds to a given
867 /// application address.
869 /// Offset = (Addr & ~AndMask) ^ XorMask
870 Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
871 uint64_t AndMask = MS.MapParams->AndMask;
872 assert(AndMask != 0 && "AndMask shall be specified");
874 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
875 ConstantInt::get(MS.IntptrTy, ~AndMask));
877 uint64_t XorMask = MS.MapParams->XorMask;
879 OffsetLong = IRB.CreateXor(OffsetLong,
880 ConstantInt::get(MS.IntptrTy, XorMask));
884 /// \brief Compute the shadow address that corresponds to a given application
887 /// Shadow = ShadowBase + Offset
888 Value *getShadowPtr(Value *Addr, Type *ShadowTy,
890 Value *ShadowLong = getShadowPtrOffset(Addr, IRB);
891 uint64_t ShadowBase = MS.MapParams->ShadowBase;
894 IRB.CreateAdd(ShadowLong,
895 ConstantInt::get(MS.IntptrTy, ShadowBase));
896 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
899 /// \brief Compute the origin address that corresponds to a given application
902 /// OriginAddr = (OriginBase + Offset) & ~3ULL
903 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB, unsigned Alignment) {
904 Value *OriginLong = getShadowPtrOffset(Addr, IRB);
905 uint64_t OriginBase = MS.MapParams->OriginBase;
908 IRB.CreateAdd(OriginLong,
909 ConstantInt::get(MS.IntptrTy, OriginBase));
910 if (Alignment < kMinOriginAlignment) {
911 uint64_t Mask = kMinOriginAlignment - 1;
912 OriginLong = IRB.CreateAnd(OriginLong,
913 ConstantInt::get(MS.IntptrTy, ~Mask));
915 return IRB.CreateIntToPtr(OriginLong,
916 PointerType::get(IRB.getInt32Ty(), 0));
919 /// \brief Compute the shadow address for a given function argument.
921 /// Shadow = ParamTLS+ArgOffset.
922 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
924 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
925 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
926 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
930 /// \brief Compute the origin address for a given function argument.
931 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
933 if (!MS.TrackOrigins) return nullptr;
934 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
935 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
936 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
940 /// \brief Compute the shadow address for a retval.
941 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
942 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
943 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
947 /// \brief Compute the origin address for a retval.
948 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
949 // We keep a single origin for the entire retval. Might be too optimistic.
950 return MS.RetvalOriginTLS;
953 /// \brief Set SV to be the shadow value for V.
954 void setShadow(Value *V, Value *SV) {
955 assert(!ShadowMap.count(V) && "Values may only have one shadow");
956 ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
959 /// \brief Set Origin to be the origin value for V.
960 void setOrigin(Value *V, Value *Origin) {
961 if (!MS.TrackOrigins) return;
962 assert(!OriginMap.count(V) && "Values may only have one origin");
963 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
964 OriginMap[V] = Origin;
967 /// \brief Create a clean shadow value for a given value.
969 /// Clean shadow (all zeroes) means all bits of the value are defined
971 Constant *getCleanShadow(Value *V) {
972 Type *ShadowTy = getShadowTy(V);
975 return Constant::getNullValue(ShadowTy);
978 /// \brief Create a dirty shadow of a given shadow type.
979 Constant *getPoisonedShadow(Type *ShadowTy) {
981 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
982 return Constant::getAllOnesValue(ShadowTy);
983 if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
984 SmallVector<Constant *, 4> Vals(AT->getNumElements(),
985 getPoisonedShadow(AT->getElementType()));
986 return ConstantArray::get(AT, Vals);
988 if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
989 SmallVector<Constant *, 4> Vals;
990 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
991 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
992 return ConstantStruct::get(ST, Vals);
994 llvm_unreachable("Unexpected shadow type");
997 /// \brief Create a dirty shadow for a given value.
998 Constant *getPoisonedShadow(Value *V) {
999 Type *ShadowTy = getShadowTy(V);
1002 return getPoisonedShadow(ShadowTy);
1005 /// \brief Create a clean (zero) origin.
1006 Value *getCleanOrigin() {
1007 return Constant::getNullValue(MS.OriginTy);
1010 /// \brief Get the shadow value for a given Value.
1012 /// This function either returns the value set earlier with setShadow,
1013 /// or extracts if from ParamTLS (for function arguments).
1014 Value *getShadow(Value *V) {
1015 if (!PropagateShadow) return getCleanShadow(V);
1016 if (Instruction *I = dyn_cast<Instruction>(V)) {
1017 // For instructions the shadow is already stored in the map.
1018 Value *Shadow = ShadowMap[V];
1020 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
1022 assert(Shadow && "No shadow for a value");
1026 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
1027 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
1028 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
1032 if (Argument *A = dyn_cast<Argument>(V)) {
1033 // For arguments we compute the shadow on demand and store it in the map.
1034 Value **ShadowPtr = &ShadowMap[V];
1037 Function *F = A->getParent();
1038 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
1039 unsigned ArgOffset = 0;
1040 for (auto &FArg : F->args()) {
1041 if (!FArg.getType()->isSized()) {
1042 DEBUG(dbgs() << "Arg is not sized\n");
1045 unsigned Size = FArg.hasByValAttr()
1046 ? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType())
1047 : MS.DL->getTypeAllocSize(FArg.getType());
1049 bool Overflow = ArgOffset + Size > kParamTLSSize;
1050 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1051 if (FArg.hasByValAttr()) {
1052 // ByVal pointer itself has clean shadow. We copy the actual
1053 // argument shadow to the underlying memory.
1054 // Figure out maximal valid memcpy alignment.
1055 unsigned ArgAlign = FArg.getParamAlignment();
1056 if (ArgAlign == 0) {
1057 Type *EltType = A->getType()->getPointerElementType();
1058 ArgAlign = MS.DL->getABITypeAlignment(EltType);
1061 // ParamTLS overflow.
1062 EntryIRB.CreateMemSet(
1063 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB),
1064 Constant::getNullValue(EntryIRB.getInt8Ty()), Size, ArgAlign);
1066 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1067 Value *Cpy = EntryIRB.CreateMemCpy(
1068 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
1070 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
1073 *ShadowPtr = getCleanShadow(V);
1076 // ParamTLS overflow.
1077 *ShadowPtr = getCleanShadow(V);
1080 EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
1083 DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<
1084 **ShadowPtr << "\n");
1085 if (MS.TrackOrigins && !Overflow) {
1087 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1088 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
1090 setOrigin(A, getCleanOrigin());
1093 ArgOffset += RoundUpToAlignment(Size, kShadowTLSAlignment);
1095 assert(*ShadowPtr && "Could not find shadow for an argument");
1098 // For everything else the shadow is zero.
1099 return getCleanShadow(V);
1102 /// \brief Get the shadow for i-th argument of the instruction I.
1103 Value *getShadow(Instruction *I, int i) {
1104 return getShadow(I->getOperand(i));
1107 /// \brief Get the origin for a value.
1108 Value *getOrigin(Value *V) {
1109 if (!MS.TrackOrigins) return nullptr;
1110 if (!PropagateShadow) return getCleanOrigin();
1111 if (isa<Constant>(V)) return getCleanOrigin();
1112 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
1113 "Unexpected value type in getOrigin()");
1114 Value *Origin = OriginMap[V];
1115 assert(Origin && "Missing origin");
1119 /// \brief Get the origin for i-th argument of the instruction I.
1120 Value *getOrigin(Instruction *I, int i) {
1121 return getOrigin(I->getOperand(i));
1124 /// \brief Remember the place where a shadow check should be inserted.
1126 /// This location will be later instrumented with a check that will print a
1127 /// UMR warning in runtime if the shadow value is not 0.
1128 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
1130 if (!InsertChecks) return;
1132 Type *ShadowTy = Shadow->getType();
1133 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
1134 "Can only insert checks for integer and vector shadow types");
1136 InstrumentationList.push_back(
1137 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1140 /// \brief Remember the place where a shadow check should be inserted.
1142 /// This location will be later instrumented with a check that will print a
1143 /// UMR warning in runtime if the value is not fully defined.
1144 void insertShadowCheck(Value *Val, Instruction *OrigIns) {
1146 Value *Shadow, *Origin;
1147 if (ClCheckConstantShadow) {
1148 Shadow = getShadow(Val);
1149 if (!Shadow) return;
1150 Origin = getOrigin(Val);
1152 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1153 if (!Shadow) return;
1154 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1156 insertShadowCheck(Shadow, Origin, OrigIns);
1159 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
1168 case AcquireRelease:
1169 return AcquireRelease;
1170 case SequentiallyConsistent:
1171 return SequentiallyConsistent;
1173 llvm_unreachable("Unknown ordering");
1176 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1185 case AcquireRelease:
1186 return AcquireRelease;
1187 case SequentiallyConsistent:
1188 return SequentiallyConsistent;
1190 llvm_unreachable("Unknown ordering");
1193 // ------------------- Visitors.
1195 /// \brief Instrument LoadInst
1197 /// Loads the corresponding shadow and (optionally) origin.
1198 /// Optionally, checks that the load address is fully defined.
1199 void visitLoadInst(LoadInst &I) {
1200 assert(I.getType()->isSized() && "Load type must have size");
1201 IRBuilder<> IRB(I.getNextNode());
1202 Type *ShadowTy = getShadowTy(&I);
1203 Value *Addr = I.getPointerOperand();
1204 if (PropagateShadow && !I.getMetadata("nosanitize")) {
1205 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1207 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
1209 setShadow(&I, getCleanShadow(&I));
1212 if (ClCheckAccessAddress)
1213 insertShadowCheck(I.getPointerOperand(), &I);
1216 I.setOrdering(addAcquireOrdering(I.getOrdering()));
1218 if (MS.TrackOrigins) {
1219 if (PropagateShadow) {
1220 unsigned Alignment = I.getAlignment();
1221 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1222 setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB, Alignment),
1225 setOrigin(&I, getCleanOrigin());
1230 /// \brief Instrument StoreInst
1232 /// Stores the corresponding shadow and (optionally) origin.
1233 /// Optionally, checks that the store address is fully defined.
1234 void visitStoreInst(StoreInst &I) {
1235 StoreList.push_back(&I);
1238 void handleCASOrRMW(Instruction &I) {
1239 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1241 IRBuilder<> IRB(&I);
1242 Value *Addr = I.getOperand(0);
1243 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
1245 if (ClCheckAccessAddress)
1246 insertShadowCheck(Addr, &I);
1248 // Only test the conditional argument of cmpxchg instruction.
1249 // The other argument can potentially be uninitialized, but we can not
1250 // detect this situation reliably without possible false positives.
1251 if (isa<AtomicCmpXchgInst>(I))
1252 insertShadowCheck(I.getOperand(1), &I);
1254 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1256 setShadow(&I, getCleanShadow(&I));
1257 setOrigin(&I, getCleanOrigin());
1260 void visitAtomicRMWInst(AtomicRMWInst &I) {
1262 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1265 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1267 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
1270 // Vector manipulation.
1271 void visitExtractElementInst(ExtractElementInst &I) {
1272 insertShadowCheck(I.getOperand(1), &I);
1273 IRBuilder<> IRB(&I);
1274 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1276 setOrigin(&I, getOrigin(&I, 0));
1279 void visitInsertElementInst(InsertElementInst &I) {
1280 insertShadowCheck(I.getOperand(2), &I);
1281 IRBuilder<> IRB(&I);
1282 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1283 I.getOperand(2), "_msprop"));
1284 setOriginForNaryOp(I);
1287 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1288 insertShadowCheck(I.getOperand(2), &I);
1289 IRBuilder<> IRB(&I);
1290 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1291 I.getOperand(2), "_msprop"));
1292 setOriginForNaryOp(I);
1296 void visitSExtInst(SExtInst &I) {
1297 IRBuilder<> IRB(&I);
1298 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1299 setOrigin(&I, getOrigin(&I, 0));
1302 void visitZExtInst(ZExtInst &I) {
1303 IRBuilder<> IRB(&I);
1304 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1305 setOrigin(&I, getOrigin(&I, 0));
1308 void visitTruncInst(TruncInst &I) {
1309 IRBuilder<> IRB(&I);
1310 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1311 setOrigin(&I, getOrigin(&I, 0));
1314 void visitBitCastInst(BitCastInst &I) {
1315 IRBuilder<> IRB(&I);
1316 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1317 setOrigin(&I, getOrigin(&I, 0));
1320 void visitPtrToIntInst(PtrToIntInst &I) {
1321 IRBuilder<> IRB(&I);
1322 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1323 "_msprop_ptrtoint"));
1324 setOrigin(&I, getOrigin(&I, 0));
1327 void visitIntToPtrInst(IntToPtrInst &I) {
1328 IRBuilder<> IRB(&I);
1329 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1330 "_msprop_inttoptr"));
1331 setOrigin(&I, getOrigin(&I, 0));
1334 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1335 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1336 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1337 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1338 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1339 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1341 /// \brief Propagate shadow for bitwise AND.
1343 /// This code is exact, i.e. if, for example, a bit in the left argument
1344 /// is defined and 0, then neither the value not definedness of the
1345 /// corresponding bit in B don't affect the resulting shadow.
1346 void visitAnd(BinaryOperator &I) {
1347 IRBuilder<> IRB(&I);
1348 // "And" of 0 and a poisoned value results in unpoisoned value.
1349 // 1&1 => 1; 0&1 => 0; p&1 => p;
1350 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1351 // 1&p => p; 0&p => 0; p&p => p;
1352 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1353 Value *S1 = getShadow(&I, 0);
1354 Value *S2 = getShadow(&I, 1);
1355 Value *V1 = I.getOperand(0);
1356 Value *V2 = I.getOperand(1);
1357 if (V1->getType() != S1->getType()) {
1358 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1359 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1361 Value *S1S2 = IRB.CreateAnd(S1, S2);
1362 Value *V1S2 = IRB.CreateAnd(V1, S2);
1363 Value *S1V2 = IRB.CreateAnd(S1, V2);
1364 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1365 setOriginForNaryOp(I);
1368 void visitOr(BinaryOperator &I) {
1369 IRBuilder<> IRB(&I);
1370 // "Or" of 1 and a poisoned value results in unpoisoned value.
1371 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1372 // 1|0 => 1; 0|0 => 0; p|0 => p;
1373 // 1|p => 1; 0|p => p; p|p => p;
1374 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1375 Value *S1 = getShadow(&I, 0);
1376 Value *S2 = getShadow(&I, 1);
1377 Value *V1 = IRB.CreateNot(I.getOperand(0));
1378 Value *V2 = IRB.CreateNot(I.getOperand(1));
1379 if (V1->getType() != S1->getType()) {
1380 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1381 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1383 Value *S1S2 = IRB.CreateAnd(S1, S2);
1384 Value *V1S2 = IRB.CreateAnd(V1, S2);
1385 Value *S1V2 = IRB.CreateAnd(S1, V2);
1386 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1387 setOriginForNaryOp(I);
1390 /// \brief Default propagation of shadow and/or origin.
1392 /// This class implements the general case of shadow propagation, used in all
1393 /// cases where we don't know and/or don't care about what the operation
1394 /// actually does. It converts all input shadow values to a common type
1395 /// (extending or truncating as necessary), and bitwise OR's them.
1397 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1398 /// fully initialized), and less prone to false positives.
1400 /// This class also implements the general case of origin propagation. For a
1401 /// Nary operation, result origin is set to the origin of an argument that is
1402 /// not entirely initialized. If there is more than one such arguments, the
1403 /// rightmost of them is picked. It does not matter which one is picked if all
1404 /// arguments are initialized.
1405 template <bool CombineShadow>
1410 MemorySanitizerVisitor *MSV;
1413 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
1414 Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {}
1416 /// \brief Add a pair of shadow and origin values to the mix.
1417 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1418 if (CombineShadow) {
1423 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1424 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1428 if (MSV->MS.TrackOrigins) {
1433 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
1434 // No point in adding something that might result in 0 origin value.
1435 if (!ConstOrigin || !ConstOrigin->isNullValue()) {
1436 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1438 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
1439 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1446 /// \brief Add an application value to the mix.
1447 Combiner &Add(Value *V) {
1448 Value *OpShadow = MSV->getShadow(V);
1449 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
1450 return Add(OpShadow, OpOrigin);
1453 /// \brief Set the current combined values as the given instruction's shadow
1455 void Done(Instruction *I) {
1456 if (CombineShadow) {
1458 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1459 MSV->setShadow(I, Shadow);
1461 if (MSV->MS.TrackOrigins) {
1463 MSV->setOrigin(I, Origin);
1468 typedef Combiner<true> ShadowAndOriginCombiner;
1469 typedef Combiner<false> OriginCombiner;
1471 /// \brief Propagate origin for arbitrary operation.
1472 void setOriginForNaryOp(Instruction &I) {
1473 if (!MS.TrackOrigins) return;
1474 IRBuilder<> IRB(&I);
1475 OriginCombiner OC(this, IRB);
1476 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1481 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1482 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1483 "Vector of pointers is not a valid shadow type");
1484 return Ty->isVectorTy() ?
1485 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1486 Ty->getPrimitiveSizeInBits();
1489 /// \brief Cast between two shadow types, extending or truncating as
1491 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
1492 bool Signed = false) {
1493 Type *srcTy = V->getType();
1494 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1495 return IRB.CreateIntCast(V, dstTy, Signed);
1496 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1497 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1498 return IRB.CreateIntCast(V, dstTy, Signed);
1499 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1500 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1501 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1503 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
1504 return IRB.CreateBitCast(V2, dstTy);
1505 // TODO: handle struct types.
1508 /// \brief Cast an application value to the type of its own shadow.
1509 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
1510 Type *ShadowTy = getShadowTy(V);
1511 if (V->getType() == ShadowTy)
1513 if (V->getType()->isPtrOrPtrVectorTy())
1514 return IRB.CreatePtrToInt(V, ShadowTy);
1516 return IRB.CreateBitCast(V, ShadowTy);
1519 /// \brief Propagate shadow for arbitrary operation.
1520 void handleShadowOr(Instruction &I) {
1521 IRBuilder<> IRB(&I);
1522 ShadowAndOriginCombiner SC(this, IRB);
1523 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1528 // \brief Handle multiplication by constant.
1530 // Handle a special case of multiplication by constant that may have one or
1531 // more zeros in the lower bits. This makes corresponding number of lower bits
1532 // of the result zero as well. We model it by shifting the other operand
1533 // shadow left by the required number of bits. Effectively, we transform
1534 // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
1535 // We use multiplication by 2**N instead of shift to cover the case of
1536 // multiplication by 0, which may occur in some elements of a vector operand.
1537 void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
1539 Constant *ShadowMul;
1540 Type *Ty = ConstArg->getType();
1541 if (Ty->isVectorTy()) {
1542 unsigned NumElements = Ty->getVectorNumElements();
1543 Type *EltTy = Ty->getSequentialElementType();
1544 SmallVector<Constant *, 16> Elements;
1545 for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
1547 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx));
1548 APInt V = Elt->getValue();
1549 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1550 Elements.push_back(ConstantInt::get(EltTy, V2));
1552 ShadowMul = ConstantVector::get(Elements);
1554 ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg);
1555 APInt V = Elt->getValue();
1556 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1557 ShadowMul = ConstantInt::get(Elt->getType(), V2);
1560 IRBuilder<> IRB(&I);
1562 IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
1563 setOrigin(&I, getOrigin(OtherArg));
1566 void visitMul(BinaryOperator &I) {
1567 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1568 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1569 if (constOp0 && !constOp1)
1570 handleMulByConstant(I, constOp0, I.getOperand(1));
1571 else if (constOp1 && !constOp0)
1572 handleMulByConstant(I, constOp1, I.getOperand(0));
1577 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1578 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1579 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1580 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1581 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1582 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1584 void handleDiv(Instruction &I) {
1585 IRBuilder<> IRB(&I);
1586 // Strict on the second argument.
1587 insertShadowCheck(I.getOperand(1), &I);
1588 setShadow(&I, getShadow(&I, 0));
1589 setOrigin(&I, getOrigin(&I, 0));
1592 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1593 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1594 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1595 void visitURem(BinaryOperator &I) { handleDiv(I); }
1596 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1597 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1599 /// \brief Instrument == and != comparisons.
1601 /// Sometimes the comparison result is known even if some of the bits of the
1602 /// arguments are not.
1603 void handleEqualityComparison(ICmpInst &I) {
1604 IRBuilder<> IRB(&I);
1605 Value *A = I.getOperand(0);
1606 Value *B = I.getOperand(1);
1607 Value *Sa = getShadow(A);
1608 Value *Sb = getShadow(B);
1610 // Get rid of pointers and vectors of pointers.
1611 // For ints (and vectors of ints), types of A and Sa match,
1612 // and this is a no-op.
1613 A = IRB.CreatePointerCast(A, Sa->getType());
1614 B = IRB.CreatePointerCast(B, Sb->getType());
1616 // A == B <==> (C = A^B) == 0
1617 // A != B <==> (C = A^B) != 0
1619 Value *C = IRB.CreateXor(A, B);
1620 Value *Sc = IRB.CreateOr(Sa, Sb);
1621 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1622 // Result is defined if one of the following is true
1623 // * there is a defined 1 bit in C
1624 // * C is fully defined
1625 // Si = !(C & ~Sc) && Sc
1626 Value *Zero = Constant::getNullValue(Sc->getType());
1627 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1629 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1631 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1632 Si->setName("_msprop_icmp");
1634 setOriginForNaryOp(I);
1637 /// \brief Build the lowest possible value of V, taking into account V's
1638 /// uninitialized bits.
1639 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1642 // Split shadow into sign bit and other bits.
1643 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1644 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1645 // Maximise the undefined shadow bit, minimize other undefined bits.
1647 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1649 // Minimize undefined bits.
1650 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1654 /// \brief Build the highest possible value of V, taking into account V's
1655 /// uninitialized bits.
1656 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1659 // Split shadow into sign bit and other bits.
1660 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1661 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1662 // Minimise the undefined shadow bit, maximise other undefined bits.
1664 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1666 // Maximize undefined bits.
1667 return IRB.CreateOr(A, Sa);
1671 /// \brief Instrument relational comparisons.
1673 /// This function does exact shadow propagation for all relational
1674 /// comparisons of integers, pointers and vectors of those.
1675 /// FIXME: output seems suboptimal when one of the operands is a constant
1676 void handleRelationalComparisonExact(ICmpInst &I) {
1677 IRBuilder<> IRB(&I);
1678 Value *A = I.getOperand(0);
1679 Value *B = I.getOperand(1);
1680 Value *Sa = getShadow(A);
1681 Value *Sb = getShadow(B);
1683 // Get rid of pointers and vectors of pointers.
1684 // For ints (and vectors of ints), types of A and Sa match,
1685 // and this is a no-op.
1686 A = IRB.CreatePointerCast(A, Sa->getType());
1687 B = IRB.CreatePointerCast(B, Sb->getType());
1689 // Let [a0, a1] be the interval of possible values of A, taking into account
1690 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1691 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1692 bool IsSigned = I.isSigned();
1693 Value *S1 = IRB.CreateICmp(I.getPredicate(),
1694 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1695 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1696 Value *S2 = IRB.CreateICmp(I.getPredicate(),
1697 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1698 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1699 Value *Si = IRB.CreateXor(S1, S2);
1701 setOriginForNaryOp(I);
1704 /// \brief Instrument signed relational comparisons.
1706 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by
1707 /// propagating the highest bit of the shadow. Everything else is delegated
1708 /// to handleShadowOr().
1709 void handleSignedRelationalComparison(ICmpInst &I) {
1710 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1711 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1712 Value* op = nullptr;
1713 CmpInst::Predicate pre = I.getPredicate();
1714 if (constOp0 && constOp0->isNullValue() &&
1715 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
1716 op = I.getOperand(1);
1717 } else if (constOp1 && constOp1->isNullValue() &&
1718 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) {
1719 op = I.getOperand(0);
1722 IRBuilder<> IRB(&I);
1724 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt");
1725 setShadow(&I, Shadow);
1726 setOrigin(&I, getOrigin(op));
1732 void visitICmpInst(ICmpInst &I) {
1733 if (!ClHandleICmp) {
1737 if (I.isEquality()) {
1738 handleEqualityComparison(I);
1742 assert(I.isRelational());
1743 if (ClHandleICmpExact) {
1744 handleRelationalComparisonExact(I);
1748 handleSignedRelationalComparison(I);
1752 assert(I.isUnsigned());
1753 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
1754 handleRelationalComparisonExact(I);
1761 void visitFCmpInst(FCmpInst &I) {
1765 void handleShift(BinaryOperator &I) {
1766 IRBuilder<> IRB(&I);
1767 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1768 // Otherwise perform the same shift on S1.
1769 Value *S1 = getShadow(&I, 0);
1770 Value *S2 = getShadow(&I, 1);
1771 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1773 Value *V2 = I.getOperand(1);
1774 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1775 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1776 setOriginForNaryOp(I);
1779 void visitShl(BinaryOperator &I) { handleShift(I); }
1780 void visitAShr(BinaryOperator &I) { handleShift(I); }
1781 void visitLShr(BinaryOperator &I) { handleShift(I); }
1783 /// \brief Instrument llvm.memmove
1785 /// At this point we don't know if llvm.memmove will be inlined or not.
1786 /// If we don't instrument it and it gets inlined,
1787 /// our interceptor will not kick in and we will lose the memmove.
1788 /// If we instrument the call here, but it does not get inlined,
1789 /// we will memove the shadow twice: which is bad in case
1790 /// of overlapping regions. So, we simply lower the intrinsic to a call.
1792 /// Similar situation exists for memcpy and memset.
1793 void visitMemMoveInst(MemMoveInst &I) {
1794 IRBuilder<> IRB(&I);
1797 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1798 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1799 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1800 I.eraseFromParent();
1803 // Similar to memmove: avoid copying shadow twice.
1804 // This is somewhat unfortunate as it may slowdown small constant memcpys.
1805 // FIXME: consider doing manual inline for small constant sizes and proper
1807 void visitMemCpyInst(MemCpyInst &I) {
1808 IRBuilder<> IRB(&I);
1811 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1812 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1813 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1814 I.eraseFromParent();
1818 void visitMemSetInst(MemSetInst &I) {
1819 IRBuilder<> IRB(&I);
1822 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1823 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1824 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1825 I.eraseFromParent();
1828 void visitVAStartInst(VAStartInst &I) {
1829 VAHelper->visitVAStartInst(I);
1832 void visitVACopyInst(VACopyInst &I) {
1833 VAHelper->visitVACopyInst(I);
1836 enum IntrinsicKind {
1837 IK_DoesNotAccessMemory,
1842 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) {
1843 const int DoesNotAccessMemory = IK_DoesNotAccessMemory;
1844 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory;
1845 const int OnlyReadsMemory = IK_OnlyReadsMemory;
1846 const int OnlyAccessesArgumentPointees = IK_WritesMemory;
1847 const int UnknownModRefBehavior = IK_WritesMemory;
1848 #define GET_INTRINSIC_MODREF_BEHAVIOR
1849 #define ModRefBehavior IntrinsicKind
1850 #include "llvm/IR/Intrinsics.gen"
1851 #undef ModRefBehavior
1852 #undef GET_INTRINSIC_MODREF_BEHAVIOR
1855 /// \brief Handle vector store-like intrinsics.
1857 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1858 /// has 1 pointer argument and 1 vector argument, returns void.
1859 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1860 IRBuilder<> IRB(&I);
1861 Value* Addr = I.getArgOperand(0);
1862 Value *Shadow = getShadow(&I, 1);
1863 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1865 // We don't know the pointer alignment (could be unaligned SSE store!).
1866 // Have to assume to worst case.
1867 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1869 if (ClCheckAccessAddress)
1870 insertShadowCheck(Addr, &I);
1872 // FIXME: use ClStoreCleanOrigin
1873 // FIXME: factor out common code from materializeStores
1874 if (MS.TrackOrigins)
1875 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB, 1));
1879 /// \brief Handle vector load-like intrinsics.
1881 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1882 /// has 1 pointer argument, returns a vector.
1883 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1884 IRBuilder<> IRB(&I);
1885 Value *Addr = I.getArgOperand(0);
1887 Type *ShadowTy = getShadowTy(&I);
1888 if (PropagateShadow) {
1889 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1890 // We don't know the pointer alignment (could be unaligned SSE load!).
1891 // Have to assume to worst case.
1892 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1894 setShadow(&I, getCleanShadow(&I));
1897 if (ClCheckAccessAddress)
1898 insertShadowCheck(Addr, &I);
1900 if (MS.TrackOrigins) {
1901 if (PropagateShadow)
1902 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB, 1)));
1904 setOrigin(&I, getCleanOrigin());
1909 /// \brief Handle (SIMD arithmetic)-like intrinsics.
1911 /// Instrument intrinsics with any number of arguments of the same type,
1912 /// equal to the return type. The type should be simple (no aggregates or
1913 /// pointers; vectors are fine).
1914 /// Caller guarantees that this intrinsic does not access memory.
1915 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1916 Type *RetTy = I.getType();
1917 if (!(RetTy->isIntOrIntVectorTy() ||
1918 RetTy->isFPOrFPVectorTy() ||
1919 RetTy->isX86_MMXTy()))
1922 unsigned NumArgOperands = I.getNumArgOperands();
1924 for (unsigned i = 0; i < NumArgOperands; ++i) {
1925 Type *Ty = I.getArgOperand(i)->getType();
1930 IRBuilder<> IRB(&I);
1931 ShadowAndOriginCombiner SC(this, IRB);
1932 for (unsigned i = 0; i < NumArgOperands; ++i)
1933 SC.Add(I.getArgOperand(i));
1939 /// \brief Heuristically instrument unknown intrinsics.
1941 /// The main purpose of this code is to do something reasonable with all
1942 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
1943 /// We recognize several classes of intrinsics by their argument types and
1944 /// ModRefBehaviour and apply special intrumentation when we are reasonably
1945 /// sure that we know what the intrinsic does.
1947 /// We special-case intrinsics where this approach fails. See llvm.bswap
1948 /// handling as an example of that.
1949 bool handleUnknownIntrinsic(IntrinsicInst &I) {
1950 unsigned NumArgOperands = I.getNumArgOperands();
1951 if (NumArgOperands == 0)
1954 Intrinsic::ID iid = I.getIntrinsicID();
1955 IntrinsicKind IK = getIntrinsicKind(iid);
1956 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory;
1957 bool WritesMemory = IK == IK_WritesMemory;
1958 assert(!(OnlyReadsMemory && WritesMemory));
1960 if (NumArgOperands == 2 &&
1961 I.getArgOperand(0)->getType()->isPointerTy() &&
1962 I.getArgOperand(1)->getType()->isVectorTy() &&
1963 I.getType()->isVoidTy() &&
1965 // This looks like a vector store.
1966 return handleVectorStoreIntrinsic(I);
1969 if (NumArgOperands == 1 &&
1970 I.getArgOperand(0)->getType()->isPointerTy() &&
1971 I.getType()->isVectorTy() &&
1973 // This looks like a vector load.
1974 return handleVectorLoadIntrinsic(I);
1977 if (!OnlyReadsMemory && !WritesMemory)
1978 if (maybeHandleSimpleNomemIntrinsic(I))
1981 // FIXME: detect and handle SSE maskstore/maskload
1985 void handleBswap(IntrinsicInst &I) {
1986 IRBuilder<> IRB(&I);
1987 Value *Op = I.getArgOperand(0);
1988 Type *OpType = Op->getType();
1989 Function *BswapFunc = Intrinsic::getDeclaration(
1990 F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
1991 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
1992 setOrigin(&I, getOrigin(Op));
1995 // \brief Instrument vector convert instrinsic.
1997 // This function instruments intrinsics like cvtsi2ss:
1998 // %Out = int_xxx_cvtyyy(%ConvertOp)
2000 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
2001 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
2002 // number \p Out elements, and (if has 2 arguments) copies the rest of the
2003 // elements from \p CopyOp.
2004 // In most cases conversion involves floating-point value which may trigger a
2005 // hardware exception when not fully initialized. For this reason we require
2006 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
2007 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
2008 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
2009 // return a fully initialized value.
2010 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
2011 IRBuilder<> IRB(&I);
2012 Value *CopyOp, *ConvertOp;
2014 switch (I.getNumArgOperands()) {
2016 CopyOp = I.getArgOperand(0);
2017 ConvertOp = I.getArgOperand(1);
2020 ConvertOp = I.getArgOperand(0);
2024 llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
2027 // The first *NumUsedElements* elements of ConvertOp are converted to the
2028 // same number of output elements. The rest of the output is copied from
2029 // CopyOp, or (if not available) filled with zeroes.
2030 // Combine shadow for elements of ConvertOp that are used in this operation,
2031 // and insert a check.
2032 // FIXME: consider propagating shadow of ConvertOp, at least in the case of
2033 // int->any conversion.
2034 Value *ConvertShadow = getShadow(ConvertOp);
2035 Value *AggShadow = nullptr;
2036 if (ConvertOp->getType()->isVectorTy()) {
2037 AggShadow = IRB.CreateExtractElement(
2038 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2039 for (int i = 1; i < NumUsedElements; ++i) {
2040 Value *MoreShadow = IRB.CreateExtractElement(
2041 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2042 AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
2045 AggShadow = ConvertShadow;
2047 assert(AggShadow->getType()->isIntegerTy());
2048 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2050 // Build result shadow by zero-filling parts of CopyOp shadow that come from
2053 assert(CopyOp->getType() == I.getType());
2054 assert(CopyOp->getType()->isVectorTy());
2055 Value *ResultShadow = getShadow(CopyOp);
2056 Type *EltTy = ResultShadow->getType()->getVectorElementType();
2057 for (int i = 0; i < NumUsedElements; ++i) {
2058 ResultShadow = IRB.CreateInsertElement(
2059 ResultShadow, ConstantInt::getNullValue(EltTy),
2060 ConstantInt::get(IRB.getInt32Ty(), i));
2062 setShadow(&I, ResultShadow);
2063 setOrigin(&I, getOrigin(CopyOp));
2065 setShadow(&I, getCleanShadow(&I));
2066 setOrigin(&I, getCleanOrigin());
2070 // Given a scalar or vector, extract lower 64 bits (or less), and return all
2071 // zeroes if it is zero, and all ones otherwise.
2072 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2073 if (S->getType()->isVectorTy())
2074 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
2075 assert(S->getType()->getPrimitiveSizeInBits() <= 64);
2076 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2077 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2080 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
2081 Type *T = S->getType();
2082 assert(T->isVectorTy());
2083 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2084 return IRB.CreateSExt(S2, T);
2087 // \brief Instrument vector shift instrinsic.
2089 // This function instruments intrinsics like int_x86_avx2_psll_w.
2090 // Intrinsic shifts %In by %ShiftSize bits.
2091 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
2092 // size, and the rest is ignored. Behavior is defined even if shift size is
2093 // greater than register (or field) width.
2094 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
2095 assert(I.getNumArgOperands() == 2);
2096 IRBuilder<> IRB(&I);
2097 // If any of the S2 bits are poisoned, the whole thing is poisoned.
2098 // Otherwise perform the same shift on S1.
2099 Value *S1 = getShadow(&I, 0);
2100 Value *S2 = getShadow(&I, 1);
2101 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2102 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2103 Value *V1 = I.getOperand(0);
2104 Value *V2 = I.getOperand(1);
2105 Value *Shift = IRB.CreateCall2(I.getCalledValue(),
2106 IRB.CreateBitCast(S1, V1->getType()), V2);
2107 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
2108 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2109 setOriginForNaryOp(I);
2112 // \brief Get an X86_MMX-sized vector type.
2113 Type *getMMXVectorTy(unsigned EltSizeInBits) {
2114 const unsigned X86_MMXSizeInBits = 64;
2115 return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
2116 X86_MMXSizeInBits / EltSizeInBits);
2119 // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack
2121 Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
2123 case llvm::Intrinsic::x86_sse2_packsswb_128:
2124 case llvm::Intrinsic::x86_sse2_packuswb_128:
2125 return llvm::Intrinsic::x86_sse2_packsswb_128;
2127 case llvm::Intrinsic::x86_sse2_packssdw_128:
2128 case llvm::Intrinsic::x86_sse41_packusdw:
2129 return llvm::Intrinsic::x86_sse2_packssdw_128;
2131 case llvm::Intrinsic::x86_avx2_packsswb:
2132 case llvm::Intrinsic::x86_avx2_packuswb:
2133 return llvm::Intrinsic::x86_avx2_packsswb;
2135 case llvm::Intrinsic::x86_avx2_packssdw:
2136 case llvm::Intrinsic::x86_avx2_packusdw:
2137 return llvm::Intrinsic::x86_avx2_packssdw;
2139 case llvm::Intrinsic::x86_mmx_packsswb:
2140 case llvm::Intrinsic::x86_mmx_packuswb:
2141 return llvm::Intrinsic::x86_mmx_packsswb;
2143 case llvm::Intrinsic::x86_mmx_packssdw:
2144 return llvm::Intrinsic::x86_mmx_packssdw;
2146 llvm_unreachable("unexpected intrinsic id");
2150 // \brief Instrument vector pack instrinsic.
2152 // This function instruments intrinsics like x86_mmx_packsswb, that
2153 // packs elements of 2 input vectors into half as many bits with saturation.
2154 // Shadow is propagated with the signed variant of the same intrinsic applied
2155 // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
2156 // EltSizeInBits is used only for x86mmx arguments.
2157 void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
2158 assert(I.getNumArgOperands() == 2);
2159 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2160 IRBuilder<> IRB(&I);
2161 Value *S1 = getShadow(&I, 0);
2162 Value *S2 = getShadow(&I, 1);
2163 assert(isX86_MMX || S1->getType()->isVectorTy());
2165 // SExt and ICmpNE below must apply to individual elements of input vectors.
2166 // In case of x86mmx arguments, cast them to appropriate vector types and
2168 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
2170 S1 = IRB.CreateBitCast(S1, T);
2171 S2 = IRB.CreateBitCast(S2, T);
2173 Value *S1_ext = IRB.CreateSExt(
2174 IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T);
2175 Value *S2_ext = IRB.CreateSExt(
2176 IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T);
2178 Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
2179 S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
2180 S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
2183 Function *ShadowFn = Intrinsic::getDeclaration(
2184 F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
2186 Value *S = IRB.CreateCall2(ShadowFn, S1_ext, S2_ext, "_msprop_vector_pack");
2187 if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
2189 setOriginForNaryOp(I);
2192 // \brief Instrument sum-of-absolute-differencies intrinsic.
2193 void handleVectorSadIntrinsic(IntrinsicInst &I) {
2194 const unsigned SignificantBitsPerResultElement = 16;
2195 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2196 Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
2197 unsigned ZeroBitsPerResultElement =
2198 ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
2200 IRBuilder<> IRB(&I);
2201 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2202 S = IRB.CreateBitCast(S, ResTy);
2203 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2205 S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
2206 S = IRB.CreateBitCast(S, getShadowTy(&I));
2208 setOriginForNaryOp(I);
2211 // \brief Instrument multiply-add intrinsic.
2212 void handleVectorPmaddIntrinsic(IntrinsicInst &I,
2213 unsigned EltSizeInBits = 0) {
2214 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2215 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
2216 IRBuilder<> IRB(&I);
2217 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2218 S = IRB.CreateBitCast(S, ResTy);
2219 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2221 S = IRB.CreateBitCast(S, getShadowTy(&I));
2223 setOriginForNaryOp(I);
2226 void visitIntrinsicInst(IntrinsicInst &I) {
2227 switch (I.getIntrinsicID()) {
2228 case llvm::Intrinsic::bswap:
2231 case llvm::Intrinsic::x86_avx512_cvtsd2usi64:
2232 case llvm::Intrinsic::x86_avx512_cvtsd2usi:
2233 case llvm::Intrinsic::x86_avx512_cvtss2usi64:
2234 case llvm::Intrinsic::x86_avx512_cvtss2usi:
2235 case llvm::Intrinsic::x86_avx512_cvttss2usi64:
2236 case llvm::Intrinsic::x86_avx512_cvttss2usi:
2237 case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
2238 case llvm::Intrinsic::x86_avx512_cvttsd2usi:
2239 case llvm::Intrinsic::x86_avx512_cvtusi2sd:
2240 case llvm::Intrinsic::x86_avx512_cvtusi2ss:
2241 case llvm::Intrinsic::x86_avx512_cvtusi642sd:
2242 case llvm::Intrinsic::x86_avx512_cvtusi642ss:
2243 case llvm::Intrinsic::x86_sse2_cvtsd2si64:
2244 case llvm::Intrinsic::x86_sse2_cvtsd2si:
2245 case llvm::Intrinsic::x86_sse2_cvtsd2ss:
2246 case llvm::Intrinsic::x86_sse2_cvtsi2sd:
2247 case llvm::Intrinsic::x86_sse2_cvtsi642sd:
2248 case llvm::Intrinsic::x86_sse2_cvtss2sd:
2249 case llvm::Intrinsic::x86_sse2_cvttsd2si64:
2250 case llvm::Intrinsic::x86_sse2_cvttsd2si:
2251 case llvm::Intrinsic::x86_sse_cvtsi2ss:
2252 case llvm::Intrinsic::x86_sse_cvtsi642ss:
2253 case llvm::Intrinsic::x86_sse_cvtss2si64:
2254 case llvm::Intrinsic::x86_sse_cvtss2si:
2255 case llvm::Intrinsic::x86_sse_cvttss2si64:
2256 case llvm::Intrinsic::x86_sse_cvttss2si:
2257 handleVectorConvertIntrinsic(I, 1);
2259 case llvm::Intrinsic::x86_sse2_cvtdq2pd:
2260 case llvm::Intrinsic::x86_sse2_cvtps2pd:
2261 case llvm::Intrinsic::x86_sse_cvtps2pi:
2262 case llvm::Intrinsic::x86_sse_cvttps2pi:
2263 handleVectorConvertIntrinsic(I, 2);
2265 case llvm::Intrinsic::x86_avx512_psll_dq:
2266 case llvm::Intrinsic::x86_avx512_psrl_dq:
2267 case llvm::Intrinsic::x86_avx2_psll_w:
2268 case llvm::Intrinsic::x86_avx2_psll_d:
2269 case llvm::Intrinsic::x86_avx2_psll_q:
2270 case llvm::Intrinsic::x86_avx2_pslli_w:
2271 case llvm::Intrinsic::x86_avx2_pslli_d:
2272 case llvm::Intrinsic::x86_avx2_pslli_q:
2273 case llvm::Intrinsic::x86_avx2_psrl_w:
2274 case llvm::Intrinsic::x86_avx2_psrl_d:
2275 case llvm::Intrinsic::x86_avx2_psrl_q:
2276 case llvm::Intrinsic::x86_avx2_psra_w:
2277 case llvm::Intrinsic::x86_avx2_psra_d:
2278 case llvm::Intrinsic::x86_avx2_psrli_w:
2279 case llvm::Intrinsic::x86_avx2_psrli_d:
2280 case llvm::Intrinsic::x86_avx2_psrli_q:
2281 case llvm::Intrinsic::x86_avx2_psrai_w:
2282 case llvm::Intrinsic::x86_avx2_psrai_d:
2283 case llvm::Intrinsic::x86_sse2_psll_w:
2284 case llvm::Intrinsic::x86_sse2_psll_d:
2285 case llvm::Intrinsic::x86_sse2_psll_q:
2286 case llvm::Intrinsic::x86_sse2_pslli_w:
2287 case llvm::Intrinsic::x86_sse2_pslli_d:
2288 case llvm::Intrinsic::x86_sse2_pslli_q:
2289 case llvm::Intrinsic::x86_sse2_psrl_w:
2290 case llvm::Intrinsic::x86_sse2_psrl_d:
2291 case llvm::Intrinsic::x86_sse2_psrl_q:
2292 case llvm::Intrinsic::x86_sse2_psra_w:
2293 case llvm::Intrinsic::x86_sse2_psra_d:
2294 case llvm::Intrinsic::x86_sse2_psrli_w:
2295 case llvm::Intrinsic::x86_sse2_psrli_d:
2296 case llvm::Intrinsic::x86_sse2_psrli_q:
2297 case llvm::Intrinsic::x86_sse2_psrai_w:
2298 case llvm::Intrinsic::x86_sse2_psrai_d:
2299 case llvm::Intrinsic::x86_mmx_psll_w:
2300 case llvm::Intrinsic::x86_mmx_psll_d:
2301 case llvm::Intrinsic::x86_mmx_psll_q:
2302 case llvm::Intrinsic::x86_mmx_pslli_w:
2303 case llvm::Intrinsic::x86_mmx_pslli_d:
2304 case llvm::Intrinsic::x86_mmx_pslli_q:
2305 case llvm::Intrinsic::x86_mmx_psrl_w:
2306 case llvm::Intrinsic::x86_mmx_psrl_d:
2307 case llvm::Intrinsic::x86_mmx_psrl_q:
2308 case llvm::Intrinsic::x86_mmx_psra_w:
2309 case llvm::Intrinsic::x86_mmx_psra_d:
2310 case llvm::Intrinsic::x86_mmx_psrli_w:
2311 case llvm::Intrinsic::x86_mmx_psrli_d:
2312 case llvm::Intrinsic::x86_mmx_psrli_q:
2313 case llvm::Intrinsic::x86_mmx_psrai_w:
2314 case llvm::Intrinsic::x86_mmx_psrai_d:
2315 handleVectorShiftIntrinsic(I, /* Variable */ false);
2317 case llvm::Intrinsic::x86_avx2_psllv_d:
2318 case llvm::Intrinsic::x86_avx2_psllv_d_256:
2319 case llvm::Intrinsic::x86_avx2_psllv_q:
2320 case llvm::Intrinsic::x86_avx2_psllv_q_256:
2321 case llvm::Intrinsic::x86_avx2_psrlv_d:
2322 case llvm::Intrinsic::x86_avx2_psrlv_d_256:
2323 case llvm::Intrinsic::x86_avx2_psrlv_q:
2324 case llvm::Intrinsic::x86_avx2_psrlv_q_256:
2325 case llvm::Intrinsic::x86_avx2_psrav_d:
2326 case llvm::Intrinsic::x86_avx2_psrav_d_256:
2327 handleVectorShiftIntrinsic(I, /* Variable */ true);
2330 // Byte shifts are not implemented.
2331 // case llvm::Intrinsic::x86_avx512_psll_dq_bs:
2332 // case llvm::Intrinsic::x86_avx512_psrl_dq_bs:
2334 case llvm::Intrinsic::x86_sse2_packsswb_128:
2335 case llvm::Intrinsic::x86_sse2_packssdw_128:
2336 case llvm::Intrinsic::x86_sse2_packuswb_128:
2337 case llvm::Intrinsic::x86_sse41_packusdw:
2338 case llvm::Intrinsic::x86_avx2_packsswb:
2339 case llvm::Intrinsic::x86_avx2_packssdw:
2340 case llvm::Intrinsic::x86_avx2_packuswb:
2341 case llvm::Intrinsic::x86_avx2_packusdw:
2342 handleVectorPackIntrinsic(I);
2345 case llvm::Intrinsic::x86_mmx_packsswb:
2346 case llvm::Intrinsic::x86_mmx_packuswb:
2347 handleVectorPackIntrinsic(I, 16);
2350 case llvm::Intrinsic::x86_mmx_packssdw:
2351 handleVectorPackIntrinsic(I, 32);
2354 case llvm::Intrinsic::x86_mmx_psad_bw:
2355 case llvm::Intrinsic::x86_sse2_psad_bw:
2356 case llvm::Intrinsic::x86_avx2_psad_bw:
2357 handleVectorSadIntrinsic(I);
2360 case llvm::Intrinsic::x86_sse2_pmadd_wd:
2361 case llvm::Intrinsic::x86_avx2_pmadd_wd:
2362 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128:
2363 case llvm::Intrinsic::x86_avx2_pmadd_ub_sw:
2364 handleVectorPmaddIntrinsic(I);
2367 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw:
2368 handleVectorPmaddIntrinsic(I, 8);
2371 case llvm::Intrinsic::x86_mmx_pmadd_wd:
2372 handleVectorPmaddIntrinsic(I, 16);
2376 if (!handleUnknownIntrinsic(I))
2377 visitInstruction(I);
2382 void visitCallSite(CallSite CS) {
2383 Instruction &I = *CS.getInstruction();
2384 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
2386 CallInst *Call = cast<CallInst>(&I);
2388 // For inline asm, do the usual thing: check argument shadow and mark all
2389 // outputs as clean. Note that any side effects of the inline asm that are
2390 // not immediately visible in its constraints are not handled.
2391 if (Call->isInlineAsm()) {
2392 visitInstruction(I);
2396 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
2398 // We are going to insert code that relies on the fact that the callee
2399 // will become a non-readonly function after it is instrumented by us. To
2400 // prevent this code from being optimized out, mark that function
2401 // non-readonly in advance.
2402 if (Function *Func = Call->getCalledFunction()) {
2403 // Clear out readonly/readnone attributes.
2405 B.addAttribute(Attribute::ReadOnly)
2406 .addAttribute(Attribute::ReadNone);
2407 Func->removeAttributes(AttributeSet::FunctionIndex,
2408 AttributeSet::get(Func->getContext(),
2409 AttributeSet::FunctionIndex,
2413 IRBuilder<> IRB(&I);
2415 unsigned ArgOffset = 0;
2416 DEBUG(dbgs() << " CallSite: " << I << "\n");
2417 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2418 ArgIt != End; ++ArgIt) {
2420 unsigned i = ArgIt - CS.arg_begin();
2421 if (!A->getType()->isSized()) {
2422 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
2426 Value *Store = nullptr;
2427 // Compute the Shadow for arg even if it is ByVal, because
2428 // in that case getShadow() will copy the actual arg shadow to
2429 // __msan_param_tls.
2430 Value *ArgShadow = getShadow(A);
2431 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
2432 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
2433 " Shadow: " << *ArgShadow << "\n");
2434 bool ArgIsInitialized = false;
2435 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
2436 assert(A->getType()->isPointerTy() &&
2437 "ByVal argument is not a pointer!");
2438 Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());
2439 if (ArgOffset + Size > kParamTLSSize) break;
2440 unsigned ParamAlignment = CS.getParamAlignment(i + 1);
2441 unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
2442 Store = IRB.CreateMemCpy(ArgShadowBase,
2443 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
2446 Size = MS.DL->getTypeAllocSize(A->getType());
2447 if (ArgOffset + Size > kParamTLSSize) break;
2448 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
2449 kShadowTLSAlignment);
2450 Constant *Cst = dyn_cast<Constant>(ArgShadow);
2451 if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
2453 if (MS.TrackOrigins && !ArgIsInitialized)
2454 IRB.CreateStore(getOrigin(A),
2455 getOriginPtrForArgument(A, IRB, ArgOffset));
2457 assert(Size != 0 && Store != nullptr);
2458 DEBUG(dbgs() << " Param:" << *Store << "\n");
2459 ArgOffset += RoundUpToAlignment(Size, 8);
2461 DEBUG(dbgs() << " done with call args\n");
2464 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
2465 if (FT->isVarArg()) {
2466 VAHelper->visitCallSite(CS, IRB);
2469 // Now, get the shadow for the RetVal.
2470 if (!I.getType()->isSized()) return;
2471 IRBuilder<> IRBBefore(&I);
2472 // Until we have full dynamic coverage, make sure the retval shadow is 0.
2473 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
2474 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
2475 Instruction *NextInsn = nullptr;
2477 NextInsn = I.getNextNode();
2479 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
2480 if (!NormalDest->getSinglePredecessor()) {
2481 // FIXME: this case is tricky, so we are just conservative here.
2482 // Perhaps we need to split the edge between this BB and NormalDest,
2483 // but a naive attempt to use SplitEdge leads to a crash.
2484 setShadow(&I, getCleanShadow(&I));
2485 setOrigin(&I, getCleanOrigin());
2488 NextInsn = NormalDest->getFirstInsertionPt();
2490 "Could not find insertion point for retval shadow load");
2492 IRBuilder<> IRBAfter(NextInsn);
2493 Value *RetvalShadow =
2494 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
2495 kShadowTLSAlignment, "_msret");
2496 setShadow(&I, RetvalShadow);
2497 if (MS.TrackOrigins)
2498 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
2501 void visitReturnInst(ReturnInst &I) {
2502 IRBuilder<> IRB(&I);
2503 Value *RetVal = I.getReturnValue();
2504 if (!RetVal) return;
2505 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
2506 if (CheckReturnValue) {
2507 insertShadowCheck(RetVal, &I);
2508 Value *Shadow = getCleanShadow(RetVal);
2509 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2511 Value *Shadow = getShadow(RetVal);
2512 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2513 // FIXME: make it conditional if ClStoreCleanOrigin==0
2514 if (MS.TrackOrigins)
2515 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
2519 void visitPHINode(PHINode &I) {
2520 IRBuilder<> IRB(&I);
2521 if (!PropagateShadow) {
2522 setShadow(&I, getCleanShadow(&I));
2523 setOrigin(&I, getCleanOrigin());
2527 ShadowPHINodes.push_back(&I);
2528 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
2530 if (MS.TrackOrigins)
2531 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
2535 void visitAllocaInst(AllocaInst &I) {
2536 setShadow(&I, getCleanShadow(&I));
2537 setOrigin(&I, getCleanOrigin());
2538 IRBuilder<> IRB(I.getNextNode());
2539 uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType());
2540 if (PoisonStack && ClPoisonStackWithCall) {
2541 IRB.CreateCall2(MS.MsanPoisonStackFn,
2542 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2543 ConstantInt::get(MS.IntptrTy, Size));
2545 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
2546 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
2547 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment());
2550 if (PoisonStack && MS.TrackOrigins) {
2551 SmallString<2048> StackDescriptionStorage;
2552 raw_svector_ostream StackDescription(StackDescriptionStorage);
2553 // We create a string with a description of the stack allocation and
2554 // pass it into __msan_set_alloca_origin.
2555 // It will be printed by the run-time if stack-originated UMR is found.
2556 // The first 4 bytes of the string are set to '----' and will be replaced
2557 // by __msan_va_arg_overflow_size_tls at the first call.
2558 StackDescription << "----" << I.getName() << "@" << F.getName();
2560 createPrivateNonConstGlobalForString(*F.getParent(),
2561 StackDescription.str());
2563 IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn,
2564 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2565 ConstantInt::get(MS.IntptrTy, Size),
2566 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
2567 IRB.CreatePointerCast(&F, MS.IntptrTy));
2571 void visitSelectInst(SelectInst& I) {
2572 IRBuilder<> IRB(&I);
2573 // a = select b, c, d
2574 Value *B = I.getCondition();
2575 Value *C = I.getTrueValue();
2576 Value *D = I.getFalseValue();
2577 Value *Sb = getShadow(B);
2578 Value *Sc = getShadow(C);
2579 Value *Sd = getShadow(D);
2581 // Result shadow if condition shadow is 0.
2582 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
2584 if (I.getType()->isAggregateType()) {
2585 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
2586 // an extra "select". This results in much more compact IR.
2587 // Sa = select Sb, poisoned, (select b, Sc, Sd)
2588 Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
2590 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
2591 // If Sb (condition is poisoned), look for bits in c and d that are equal
2592 // and both unpoisoned.
2593 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
2595 // Cast arguments to shadow-compatible type.
2596 C = CreateAppToShadowCast(IRB, C);
2597 D = CreateAppToShadowCast(IRB, D);
2599 // Result shadow if condition shadow is 1.
2600 Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd));
2602 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
2604 if (MS.TrackOrigins) {
2605 // Origins are always i32, so any vector conditions must be flattened.
2606 // FIXME: consider tracking vector origins for app vectors?
2607 if (B->getType()->isVectorTy()) {
2608 Type *FlatTy = getShadowTyNoVec(B->getType());
2609 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
2610 ConstantInt::getNullValue(FlatTy));
2611 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
2612 ConstantInt::getNullValue(FlatTy));
2614 // a = select b, c, d
2615 // Oa = Sb ? Ob : (b ? Oc : Od)
2617 &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
2618 IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
2619 getOrigin(I.getFalseValue()))));
2623 void visitLandingPadInst(LandingPadInst &I) {
2625 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
2626 setShadow(&I, getCleanShadow(&I));
2627 setOrigin(&I, getCleanOrigin());
2630 void visitGetElementPtrInst(GetElementPtrInst &I) {
2634 void visitExtractValueInst(ExtractValueInst &I) {
2635 IRBuilder<> IRB(&I);
2636 Value *Agg = I.getAggregateOperand();
2637 DEBUG(dbgs() << "ExtractValue: " << I << "\n");
2638 Value *AggShadow = getShadow(Agg);
2639 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2640 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2641 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
2642 setShadow(&I, ResShadow);
2643 setOriginForNaryOp(I);
2646 void visitInsertValueInst(InsertValueInst &I) {
2647 IRBuilder<> IRB(&I);
2648 DEBUG(dbgs() << "InsertValue: " << I << "\n");
2649 Value *AggShadow = getShadow(I.getAggregateOperand());
2650 Value *InsShadow = getShadow(I.getInsertedValueOperand());
2651 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2652 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
2653 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2654 DEBUG(dbgs() << " Res: " << *Res << "\n");
2656 setOriginForNaryOp(I);
2659 void dumpInst(Instruction &I) {
2660 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2661 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
2663 errs() << "ZZZ " << I.getOpcodeName() << "\n";
2665 errs() << "QQQ " << I << "\n";
2668 void visitResumeInst(ResumeInst &I) {
2669 DEBUG(dbgs() << "Resume: " << I << "\n");
2670 // Nothing to do here.
2673 void visitInstruction(Instruction &I) {
2674 // Everything else: stop propagating and check for poisoned shadow.
2675 if (ClDumpStrictInstructions)
2677 DEBUG(dbgs() << "DEFAULT: " << I << "\n");
2678 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
2679 insertShadowCheck(I.getOperand(i), &I);
2680 setShadow(&I, getCleanShadow(&I));
2681 setOrigin(&I, getCleanOrigin());
2685 /// \brief AMD64-specific implementation of VarArgHelper.
2686 struct VarArgAMD64Helper : public VarArgHelper {
2687 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
2688 // See a comment in visitCallSite for more details.
2689 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
2690 static const unsigned AMD64FpEndOffset = 176;
2693 MemorySanitizer &MS;
2694 MemorySanitizerVisitor &MSV;
2695 Value *VAArgTLSCopy;
2696 Value *VAArgOverflowSize;
2698 SmallVector<CallInst*, 16> VAStartInstrumentationList;
2700 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
2701 MemorySanitizerVisitor &MSV)
2702 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
2703 VAArgOverflowSize(nullptr) {}
2705 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
2707 ArgKind classifyArgument(Value* arg) {
2708 // A very rough approximation of X86_64 argument classification rules.
2709 Type *T = arg->getType();
2710 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
2711 return AK_FloatingPoint;
2712 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
2713 return AK_GeneralPurpose;
2714 if (T->isPointerTy())
2715 return AK_GeneralPurpose;
2719 // For VarArg functions, store the argument shadow in an ABI-specific format
2720 // that corresponds to va_list layout.
2721 // We do this because Clang lowers va_arg in the frontend, and this pass
2722 // only sees the low level code that deals with va_list internals.
2723 // A much easier alternative (provided that Clang emits va_arg instructions)
2724 // would have been to associate each live instance of va_list with a copy of
2725 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
2727 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
2728 unsigned GpOffset = 0;
2729 unsigned FpOffset = AMD64GpEndOffset;
2730 unsigned OverflowOffset = AMD64FpEndOffset;
2731 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2732 ArgIt != End; ++ArgIt) {
2734 unsigned ArgNo = CS.getArgumentNo(ArgIt);
2735 bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal);
2737 // ByVal arguments always go to the overflow area.
2738 assert(A->getType()->isPointerTy());
2739 Type *RealTy = A->getType()->getPointerElementType();
2740 uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy);
2741 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
2742 OverflowOffset += RoundUpToAlignment(ArgSize, 8);
2743 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
2744 ArgSize, kShadowTLSAlignment);
2746 ArgKind AK = classifyArgument(A);
2747 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
2749 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
2753 case AK_GeneralPurpose:
2754 Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset);
2757 case AK_FloatingPoint:
2758 Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset);
2762 uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
2763 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
2764 OverflowOffset += RoundUpToAlignment(ArgSize, 8);
2766 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
2769 Constant *OverflowSize =
2770 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
2771 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
2774 /// \brief Compute the shadow address for a given va_arg.
2775 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
2777 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
2778 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
2779 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
2783 void visitVAStartInst(VAStartInst &I) override {
2784 IRBuilder<> IRB(&I);
2785 VAStartInstrumentationList.push_back(&I);
2786 Value *VAListTag = I.getArgOperand(0);
2787 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2789 // Unpoison the whole __va_list_tag.
2790 // FIXME: magic ABI constants.
2791 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2792 /* size */24, /* alignment */8, false);
2795 void visitVACopyInst(VACopyInst &I) override {
2796 IRBuilder<> IRB(&I);
2797 Value *VAListTag = I.getArgOperand(0);
2798 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2800 // Unpoison the whole __va_list_tag.
2801 // FIXME: magic ABI constants.
2802 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2803 /* size */24, /* alignment */8, false);
2806 void finalizeInstrumentation() override {
2807 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
2808 "finalizeInstrumentation called twice");
2809 if (!VAStartInstrumentationList.empty()) {
2810 // If there is a va_start in this function, make a backup copy of
2811 // va_arg_tls somewhere in the function entry block.
2812 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
2813 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
2815 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
2817 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
2818 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
2821 // Instrument va_start.
2822 // Copy va_list shadow from the backup copy of the TLS contents.
2823 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
2824 CallInst *OrigInst = VAStartInstrumentationList[i];
2825 IRBuilder<> IRB(OrigInst->getNextNode());
2826 Value *VAListTag = OrigInst->getArgOperand(0);
2828 Value *RegSaveAreaPtrPtr =
2830 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2831 ConstantInt::get(MS.IntptrTy, 16)),
2832 Type::getInt64PtrTy(*MS.C));
2833 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
2834 Value *RegSaveAreaShadowPtr =
2835 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
2836 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
2837 AMD64FpEndOffset, 16);
2839 Value *OverflowArgAreaPtrPtr =
2841 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2842 ConstantInt::get(MS.IntptrTy, 8)),
2843 Type::getInt64PtrTy(*MS.C));
2844 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
2845 Value *OverflowArgAreaShadowPtr =
2846 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
2847 Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset);
2848 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
2853 /// \brief A no-op implementation of VarArgHelper.
2854 struct VarArgNoOpHelper : public VarArgHelper {
2855 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
2856 MemorySanitizerVisitor &MSV) {}
2858 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
2860 void visitVAStartInst(VAStartInst &I) override {}
2862 void visitVACopyInst(VACopyInst &I) override {}
2864 void finalizeInstrumentation() override {}
2867 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
2868 MemorySanitizerVisitor &Visitor) {
2869 // VarArg handling is only implemented on AMD64. False positives are possible
2870 // on other platforms.
2871 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
2872 if (TargetTriple.getArch() == llvm::Triple::x86_64)
2873 return new VarArgAMD64Helper(Func, Msan, Visitor);
2875 return new VarArgNoOpHelper(Func, Msan, Visitor);
2880 bool MemorySanitizer::runOnFunction(Function &F) {
2881 MemorySanitizerVisitor Visitor(F, *this);
2883 // Clear out readonly/readnone attributes.
2885 B.addAttribute(Attribute::ReadOnly)
2886 .addAttribute(Attribute::ReadNone);
2887 F.removeAttributes(AttributeSet::FunctionIndex,
2888 AttributeSet::get(F.getContext(),
2889 AttributeSet::FunctionIndex, B));
2891 return Visitor.runOnFunction();