1 //===-- DataFlowSanitizer.cpp - dynamic data flow analysis ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow
13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific
14 /// class of bugs on its own. Instead, it provides a generic dynamic data flow
15 /// analysis framework to be used by clients to help detect application-specific
16 /// issues within their own code.
18 /// The analysis is based on automatic propagation of data flow labels (also
19 /// known as taint labels) through a program as it performs computation. Each
20 /// byte of application memory is backed by two bytes of shadow memory which
21 /// hold the label. On Linux/x86_64, memory is laid out as follows:
23 /// +--------------------+ 0x800000000000 (top of memory)
24 /// | application memory |
25 /// +--------------------+ 0x700000008000 (kAppAddr)
29 /// +--------------------+ 0x200200000000 (kUnusedAddr)
31 /// +--------------------+ 0x200000000000 (kUnionTableAddr)
33 /// +--------------------+ 0x000000010000 (kShadowAddr)
34 /// | reserved by kernel |
35 /// +--------------------+ 0x000000000000
37 /// To derive a shadow memory address from an application memory address,
38 /// bits 44-46 are cleared to bring the address into the range
39 /// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to
40 /// account for the double byte representation of shadow labels and move the
41 /// address into the shadow memory range. See the function
42 /// DataFlowSanitizer::getShadowAddress below.
44 /// For more information, please refer to the design document:
45 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html
47 #include "llvm/Transforms/Instrumentation.h"
48 #include "llvm/ADT/DenseMap.h"
49 #include "llvm/ADT/DenseSet.h"
50 #include "llvm/ADT/DepthFirstIterator.h"
51 #include "llvm/ADT/StringExtras.h"
52 #include "llvm/ADT/Triple.h"
53 #include "llvm/Analysis/ValueTracking.h"
54 #include "llvm/IR/Dominators.h"
55 #include "llvm/IR/DebugInfo.h"
56 #include "llvm/IR/IRBuilder.h"
57 #include "llvm/IR/InlineAsm.h"
58 #include "llvm/IR/InstVisitor.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/MDBuilder.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/Pass.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/SpecialCaseList.h"
66 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
67 #include "llvm/Transforms/Utils/Local.h"
75 // VMA size definition for architecture that support multiple sizes.
76 // AArch64 has 3 VMA sizes: 39, 42 and 48.
77 #ifndef SANITIZER_AARCH64_VMA
78 # define SANITIZER_AARCH64_VMA 39
80 # if SANITIZER_AARCH64_VMA != 39 && SANITIZER_AARCH64_VMA != 42
81 # error "invalid SANITIZER_AARCH64_VMA size"
85 // The -dfsan-preserve-alignment flag controls whether this pass assumes that
86 // alignment requirements provided by the input IR are correct. For example,
87 // if the input IR contains a load with alignment 8, this flag will cause
88 // the shadow load to have alignment 16. This flag is disabled by default as
89 // we have unfortunately encountered too much code (including Clang itself;
90 // see PR14291) which performs misaligned access.
91 static cl::opt<bool> ClPreserveAlignment(
92 "dfsan-preserve-alignment",
93 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden,
96 // The ABI list files control how shadow parameters are passed. The pass treats
97 // every function labelled "uninstrumented" in the ABI list file as conforming
98 // to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains
99 // additional annotations for those functions, a call to one of those functions
100 // will produce a warning message, as the labelling behaviour of the function is
101 // unknown. The other supported annotations are "functional" and "discard",
102 // which are described below under DataFlowSanitizer::WrapperKind.
103 static cl::list<std::string> ClABIListFiles(
105 cl::desc("File listing native ABI functions and how the pass treats them"),
108 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented
109 // functions (see DataFlowSanitizer::InstrumentedABI below).
110 static cl::opt<bool> ClArgsABI(
112 cl::desc("Use the argument ABI rather than the TLS ABI"),
115 // Controls whether the pass includes or ignores the labels of pointers in load
117 static cl::opt<bool> ClCombinePointerLabelsOnLoad(
118 "dfsan-combine-pointer-labels-on-load",
119 cl::desc("Combine the label of the pointer with the label of the data when "
120 "loading from memory."),
121 cl::Hidden, cl::init(true));
123 // Controls whether the pass includes or ignores the labels of pointers in
124 // stores instructions.
125 static cl::opt<bool> ClCombinePointerLabelsOnStore(
126 "dfsan-combine-pointer-labels-on-store",
127 cl::desc("Combine the label of the pointer with the label of the data when "
128 "storing in memory."),
129 cl::Hidden, cl::init(false));
131 static cl::opt<bool> ClDebugNonzeroLabels(
132 "dfsan-debug-nonzero-labels",
133 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, "
134 "load or return with a nonzero label"),
139 StringRef GetGlobalTypeString(const GlobalValue &G) {
140 // Types of GlobalVariables are always pointer types.
141 Type *GType = G.getType()->getElementType();
142 // For now we support blacklisting struct types only.
143 if (StructType *SGType = dyn_cast<StructType>(GType)) {
144 if (!SGType->isLiteral())
145 return SGType->getName();
147 return "<unknown type>";
151 std::unique_ptr<SpecialCaseList> SCL;
156 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }
158 /// Returns whether either this function or its source file are listed in the
160 bool isIn(const Function &F, StringRef Category) const {
161 return isIn(*F.getParent(), Category) ||
162 SCL->inSection("fun", F.getName(), Category);
165 /// Returns whether this global alias is listed in the given category.
167 /// If GA aliases a function, the alias's name is matched as a function name
168 /// would be. Similarly, aliases of globals are matched like globals.
169 bool isIn(const GlobalAlias &GA, StringRef Category) const {
170 if (isIn(*GA.getParent(), Category))
173 if (isa<FunctionType>(GA.getType()->getElementType()))
174 return SCL->inSection("fun", GA.getName(), Category);
176 return SCL->inSection("global", GA.getName(), Category) ||
177 SCL->inSection("type", GetGlobalTypeString(GA), Category);
180 /// Returns whether this module is listed in the given category.
181 bool isIn(const Module &M, StringRef Category) const {
182 return SCL->inSection("src", M.getModuleIdentifier(), Category);
186 class DataFlowSanitizer : public ModulePass {
187 friend struct DFSanFunction;
188 friend class DFSanVisitor;
194 /// Which ABI should be used for instrumented functions?
195 enum InstrumentedABI {
196 /// Argument and return value labels are passed through additional
197 /// arguments and by modifying the return type.
200 /// Argument and return value labels are passed through TLS variables
201 /// __dfsan_arg_tls and __dfsan_retval_tls.
205 /// How should calls to uninstrumented functions be handled?
207 /// This function is present in an uninstrumented form but we don't know
208 /// how it should be handled. Print a warning and call the function anyway.
209 /// Don't label the return value.
212 /// This function does not write to (user-accessible) memory, and its return
213 /// value is unlabelled.
216 /// This function does not write to (user-accessible) memory, and the label
217 /// of its return value is the union of the label of its arguments.
220 /// Instead of calling the function, a custom wrapper __dfsw_F is called,
221 /// where F is the name of the function. This function may wrap the
222 /// original function or provide its own implementation. This is similar to
223 /// the IA_Args ABI, except that IA_Args uses a struct return type to
224 /// pass the return value shadow in a register, while WK_Custom uses an
225 /// extra pointer argument to return the shadow. This allows the wrapped
226 /// form of the function type to be expressed in C.
232 IntegerType *ShadowTy;
233 PointerType *ShadowPtrTy;
234 IntegerType *IntptrTy;
235 ConstantInt *ZeroShadow;
236 ConstantInt *ShadowPtrMask;
237 ConstantInt *ShadowPtrMul;
240 void *(*GetArgTLSPtr)();
241 void *(*GetRetvalTLSPtr)();
243 Constant *GetRetvalTLS;
244 FunctionType *DFSanUnionFnTy;
245 FunctionType *DFSanUnionLoadFnTy;
246 FunctionType *DFSanUnimplementedFnTy;
247 FunctionType *DFSanSetLabelFnTy;
248 FunctionType *DFSanNonzeroLabelFnTy;
249 FunctionType *DFSanVarargWrapperFnTy;
250 Constant *DFSanUnionFn;
251 Constant *DFSanCheckedUnionFn;
252 Constant *DFSanUnionLoadFn;
253 Constant *DFSanUnimplementedFn;
254 Constant *DFSanSetLabelFn;
255 Constant *DFSanNonzeroLabelFn;
256 Constant *DFSanVarargWrapperFn;
257 MDNode *ColdCallWeights;
258 DFSanABIList ABIList;
259 DenseMap<Value *, Function *> UnwrappedFnMap;
260 AttributeSet ReadOnlyNoneAttrs;
261 DenseMap<const Function *, DISubprogram *> FunctionDIs;
263 Value *getShadowAddress(Value *Addr, Instruction *Pos);
264 bool isInstrumented(const Function *F);
265 bool isInstrumented(const GlobalAlias *GA);
266 FunctionType *getArgsFunctionType(FunctionType *T);
267 FunctionType *getTrampolineFunctionType(FunctionType *T);
268 FunctionType *getCustomFunctionType(FunctionType *T);
269 InstrumentedABI getInstrumentedABI();
270 WrapperKind getWrapperKind(Function *F);
271 void addGlobalNamePrefix(GlobalValue *GV);
272 Function *buildWrapperFunction(Function *F, StringRef NewFName,
273 GlobalValue::LinkageTypes NewFLink,
274 FunctionType *NewFT);
275 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName);
279 const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
280 void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
282 bool doInitialization(Module &M) override;
283 bool runOnModule(Module &M) override;
286 struct DFSanFunction {
287 DataFlowSanitizer &DFS;
290 DataFlowSanitizer::InstrumentedABI IA;
294 AllocaInst *LabelReturnAlloca;
295 DenseMap<Value *, Value *> ValShadowMap;
296 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap;
297 std::vector<std::pair<PHINode *, PHINode *> > PHIFixups;
298 DenseSet<Instruction *> SkipInsts;
299 std::vector<Value *> NonZeroChecks;
302 struct CachedCombinedShadow {
306 DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow>
307 CachedCombinedShadows;
308 DenseMap<Value *, std::set<Value *>> ShadowElements;
310 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI)
311 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()),
312 IsNativeABI(IsNativeABI), ArgTLSPtr(nullptr), RetvalTLSPtr(nullptr),
313 LabelReturnAlloca(nullptr) {
315 // FIXME: Need to track down the register allocator issue which causes poor
316 // performance in pathological cases with large numbers of basic blocks.
317 AvoidNewBlocks = F->size() > 1000;
319 Value *getArgTLSPtr();
320 Value *getArgTLS(unsigned Index, Instruction *Pos);
321 Value *getRetvalTLS();
322 Value *getShadow(Value *V);
323 void setShadow(Instruction *I, Value *Shadow);
324 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
325 Value *combineOperandShadows(Instruction *Inst);
326 Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align,
328 void storeShadow(Value *Addr, uint64_t Size, uint64_t Align, Value *Shadow,
332 class DFSanVisitor : public InstVisitor<DFSanVisitor> {
335 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {}
337 void visitOperandShadowInst(Instruction &I);
339 void visitBinaryOperator(BinaryOperator &BO);
340 void visitCastInst(CastInst &CI);
341 void visitCmpInst(CmpInst &CI);
342 void visitGetElementPtrInst(GetElementPtrInst &GEPI);
343 void visitLoadInst(LoadInst &LI);
344 void visitStoreInst(StoreInst &SI);
345 void visitReturnInst(ReturnInst &RI);
346 void visitCallSite(CallSite CS);
347 void visitPHINode(PHINode &PN);
348 void visitExtractElementInst(ExtractElementInst &I);
349 void visitInsertElementInst(InsertElementInst &I);
350 void visitShuffleVectorInst(ShuffleVectorInst &I);
351 void visitExtractValueInst(ExtractValueInst &I);
352 void visitInsertValueInst(InsertValueInst &I);
353 void visitAllocaInst(AllocaInst &I);
354 void visitSelectInst(SelectInst &I);
355 void visitMemSetInst(MemSetInst &I);
356 void visitMemTransferInst(MemTransferInst &I);
361 char DataFlowSanitizer::ID;
362 INITIALIZE_PASS(DataFlowSanitizer, "dfsan",
363 "DataFlowSanitizer: dynamic data flow analysis.", false, false)
366 llvm::createDataFlowSanitizerPass(const std::vector<std::string> &ABIListFiles,
367 void *(*getArgTLS)(),
368 void *(*getRetValTLS)()) {
369 return new DataFlowSanitizer(ABIListFiles, getArgTLS, getRetValTLS);
372 DataFlowSanitizer::DataFlowSanitizer(
373 const std::vector<std::string> &ABIListFiles, void *(*getArgTLS)(),
374 void *(*getRetValTLS)())
375 : ModulePass(ID), GetArgTLSPtr(getArgTLS), GetRetvalTLSPtr(getRetValTLS) {
376 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles));
377 AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(),
378 ClABIListFiles.end());
379 ABIList.set(SpecialCaseList::createOrDie(AllABIListFiles));
382 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) {
383 llvm::SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end());
384 ArgTypes.append(T->getNumParams(), ShadowTy);
386 ArgTypes.push_back(ShadowPtrTy);
387 Type *RetType = T->getReturnType();
388 if (!RetType->isVoidTy())
389 RetType = StructType::get(RetType, ShadowTy, (Type *)nullptr);
390 return FunctionType::get(RetType, ArgTypes, T->isVarArg());
393 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) {
394 assert(!T->isVarArg());
395 llvm::SmallVector<Type *, 4> ArgTypes;
396 ArgTypes.push_back(T->getPointerTo());
397 ArgTypes.append(T->param_begin(), T->param_end());
398 ArgTypes.append(T->getNumParams(), ShadowTy);
399 Type *RetType = T->getReturnType();
400 if (!RetType->isVoidTy())
401 ArgTypes.push_back(ShadowPtrTy);
402 return FunctionType::get(T->getReturnType(), ArgTypes, false);
405 FunctionType *DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
406 llvm::SmallVector<Type *, 4> ArgTypes;
407 for (FunctionType::param_iterator i = T->param_begin(), e = T->param_end();
410 if (isa<PointerType>(*i) && (FT = dyn_cast<FunctionType>(cast<PointerType>(
411 *i)->getElementType()))) {
412 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo());
413 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx));
415 ArgTypes.push_back(*i);
418 for (unsigned i = 0, e = T->getNumParams(); i != e; ++i)
419 ArgTypes.push_back(ShadowTy);
421 ArgTypes.push_back(ShadowPtrTy);
422 Type *RetType = T->getReturnType();
423 if (!RetType->isVoidTy())
424 ArgTypes.push_back(ShadowPtrTy);
425 return FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg());
428 bool DataFlowSanitizer::doInitialization(Module &M) {
429 llvm::Triple TargetTriple(M.getTargetTriple());
430 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
431 bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
432 TargetTriple.getArch() == llvm::Triple::mips64el;
433 bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64 ||
434 TargetTriple.getArch() == llvm::Triple::aarch64_be;
436 const DataLayout &DL = M.getDataLayout();
439 Ctx = &M.getContext();
440 ShadowTy = IntegerType::get(*Ctx, ShadowWidth);
441 ShadowPtrTy = PointerType::getUnqual(ShadowTy);
442 IntptrTy = DL.getIntPtrType(*Ctx);
443 ZeroShadow = ConstantInt::getSigned(ShadowTy, 0);
444 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8);
446 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL);
448 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL);
450 #if SANITIZER_AARCH64_VMA == 39
451 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x7800000000LL);
453 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x3c000000000LL);
456 report_fatal_error("unsupported triple");
458 Type *DFSanUnionArgs[2] = { ShadowTy, ShadowTy };
460 FunctionType::get(ShadowTy, DFSanUnionArgs, /*isVarArg=*/ false);
461 Type *DFSanUnionLoadArgs[2] = { ShadowPtrTy, IntptrTy };
463 FunctionType::get(ShadowTy, DFSanUnionLoadArgs, /*isVarArg=*/ false);
464 DFSanUnimplementedFnTy = FunctionType::get(
465 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
466 Type *DFSanSetLabelArgs[3] = { ShadowTy, Type::getInt8PtrTy(*Ctx), IntptrTy };
467 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),
468 DFSanSetLabelArgs, /*isVarArg=*/false);
469 DFSanNonzeroLabelFnTy = FunctionType::get(
470 Type::getVoidTy(*Ctx), None, /*isVarArg=*/false);
471 DFSanVarargWrapperFnTy = FunctionType::get(
472 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
475 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
477 GetArgTLS = ConstantExpr::getIntToPtr(
478 ConstantInt::get(IntptrTy, uintptr_t(GetArgTLSPtr)),
479 PointerType::getUnqual(
480 FunctionType::get(PointerType::getUnqual(ArgTLSTy),
483 if (GetRetvalTLSPtr) {
485 GetRetvalTLS = ConstantExpr::getIntToPtr(
486 ConstantInt::get(IntptrTy, uintptr_t(GetRetvalTLSPtr)),
487 PointerType::getUnqual(
488 FunctionType::get(PointerType::getUnqual(ShadowTy),
492 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
496 bool DataFlowSanitizer::isInstrumented(const Function *F) {
497 return !ABIList.isIn(*F, "uninstrumented");
500 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) {
501 return !ABIList.isIn(*GA, "uninstrumented");
504 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
505 return ClArgsABI ? IA_Args : IA_TLS;
508 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) {
509 if (ABIList.isIn(*F, "functional"))
510 return WK_Functional;
511 if (ABIList.isIn(*F, "discard"))
513 if (ABIList.isIn(*F, "custom"))
519 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) {
520 std::string GVName = GV->getName(), Prefix = "dfs$";
521 GV->setName(Prefix + GVName);
523 // Try to change the name of the function in module inline asm. We only do
524 // this for specific asm directives, currently only ".symver", to try to avoid
525 // corrupting asm which happens to contain the symbol name as a substring.
526 // Note that the substitution for .symver assumes that the versioned symbol
527 // also has an instrumented name.
528 std::string Asm = GV->getParent()->getModuleInlineAsm();
529 std::string SearchStr = ".symver " + GVName + ",";
530 size_t Pos = Asm.find(SearchStr);
531 if (Pos != std::string::npos) {
532 Asm.replace(Pos, SearchStr.size(),
533 ".symver " + Prefix + GVName + "," + Prefix);
534 GV->getParent()->setModuleInlineAsm(Asm);
539 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
540 GlobalValue::LinkageTypes NewFLink,
541 FunctionType *NewFT) {
542 FunctionType *FT = F->getFunctionType();
543 Function *NewF = Function::Create(NewFT, NewFLink, NewFName,
545 NewF->copyAttributesFrom(F);
546 NewF->removeAttributes(
547 AttributeSet::ReturnIndex,
548 AttributeSet::get(F->getContext(), AttributeSet::ReturnIndex,
549 AttributeFuncs::typeIncompatible(NewFT->getReturnType())));
551 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF);
553 NewF->removeAttributes(
554 AttributeSet::FunctionIndex,
555 AttributeSet().addAttribute(*Ctx, AttributeSet::FunctionIndex,
557 CallInst::Create(DFSanVarargWrapperFn,
558 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "",
560 new UnreachableInst(*Ctx, BB);
562 std::vector<Value *> Args;
563 unsigned n = FT->getNumParams();
564 for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n)
565 Args.push_back(&*ai);
566 CallInst *CI = CallInst::Create(F, Args, "", BB);
567 if (FT->getReturnType()->isVoidTy())
568 ReturnInst::Create(*Ctx, BB);
570 ReturnInst::Create(*Ctx, CI, BB);
576 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
578 FunctionType *FTT = getTrampolineFunctionType(FT);
579 Constant *C = Mod->getOrInsertFunction(FName, FTT);
580 Function *F = dyn_cast<Function>(C);
581 if (F && F->isDeclaration()) {
582 F->setLinkage(GlobalValue::LinkOnceODRLinkage);
583 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F);
584 std::vector<Value *> Args;
585 Function::arg_iterator AI = F->arg_begin(); ++AI;
586 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N)
587 Args.push_back(&*AI);
589 CallInst::Create(&F->getArgumentList().front(), Args, "", BB);
591 if (FT->getReturnType()->isVoidTy())
592 RI = ReturnInst::Create(*Ctx, BB);
594 RI = ReturnInst::Create(*Ctx, CI, BB);
596 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true);
597 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI;
598 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N)
599 DFSF.ValShadowMap[&*ValAI] = &*ShadowAI;
600 DFSanVisitor(DFSF).visitCallInst(*CI);
601 if (!FT->getReturnType()->isVoidTy())
602 new StoreInst(DFSF.getShadow(RI->getReturnValue()),
603 &F->getArgumentList().back(), RI);
609 bool DataFlowSanitizer::runOnModule(Module &M) {
610 if (ABIList.isIn(M, "skip"))
613 FunctionDIs = makeSubprogramMap(M);
616 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
617 ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy);
618 if (GlobalVariable *G = dyn_cast<GlobalVariable>(ArgTLS))
619 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
621 if (!GetRetvalTLSPtr) {
622 RetvalTLS = Mod->getOrInsertGlobal("__dfsan_retval_tls", ShadowTy);
623 if (GlobalVariable *G = dyn_cast<GlobalVariable>(RetvalTLS))
624 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
627 DFSanUnionFn = Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy);
628 if (Function *F = dyn_cast<Function>(DFSanUnionFn)) {
629 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
630 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
631 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
632 F->addAttribute(1, Attribute::ZExt);
633 F->addAttribute(2, Attribute::ZExt);
635 DFSanCheckedUnionFn = Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy);
636 if (Function *F = dyn_cast<Function>(DFSanCheckedUnionFn)) {
637 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
638 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
639 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
640 F->addAttribute(1, Attribute::ZExt);
641 F->addAttribute(2, Attribute::ZExt);
644 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy);
645 if (Function *F = dyn_cast<Function>(DFSanUnionLoadFn)) {
646 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
647 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly);
648 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
650 DFSanUnimplementedFn =
651 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy);
653 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy);
654 if (Function *F = dyn_cast<Function>(DFSanSetLabelFn)) {
655 F->addAttribute(1, Attribute::ZExt);
657 DFSanNonzeroLabelFn =
658 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy);
659 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper",
660 DFSanVarargWrapperFnTy);
662 std::vector<Function *> FnsToInstrument;
663 llvm::SmallPtrSet<Function *, 2> FnsWithNativeABI;
664 for (Function &i : M) {
665 if (!i.isIntrinsic() &&
666 &i != DFSanUnionFn &&
667 &i != DFSanCheckedUnionFn &&
668 &i != DFSanUnionLoadFn &&
669 &i != DFSanUnimplementedFn &&
670 &i != DFSanSetLabelFn &&
671 &i != DFSanNonzeroLabelFn &&
672 &i != DFSanVarargWrapperFn)
673 FnsToInstrument.push_back(&i);
676 // Give function aliases prefixes when necessary, and build wrappers where the
677 // instrumentedness is inconsistent.
678 for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) {
679 GlobalAlias *GA = &*i;
681 // Don't stop on weak. We assume people aren't playing games with the
682 // instrumentedness of overridden weak aliases.
683 if (auto F = dyn_cast<Function>(GA->getBaseObject())) {
684 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
685 if (GAInst && FInst) {
686 addGlobalNamePrefix(GA);
687 } else if (GAInst != FInst) {
688 // Non-instrumented alias of an instrumented function, or vice versa.
689 // Replace the alias with a native-ABI wrapper of the aliasee. The pass
690 // below will take care of instrumenting it.
692 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
693 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
695 GA->eraseFromParent();
696 FnsToInstrument.push_back(NewF);
702 B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone);
703 ReadOnlyNoneAttrs = AttributeSet::get(*Ctx, AttributeSet::FunctionIndex, B);
705 // First, change the ABI of every function in the module. ABI-listed
706 // functions keep their original ABI and get a wrapper function.
707 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(),
708 e = FnsToInstrument.end();
711 FunctionType *FT = F.getFunctionType();
713 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() &&
714 FT->getReturnType()->isVoidTy());
716 if (isInstrumented(&F)) {
717 // Instrumented functions get a 'dfs$' prefix. This allows us to more
718 // easily identify cases of mismatching ABIs.
719 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) {
720 FunctionType *NewFT = getArgsFunctionType(FT);
721 Function *NewF = Function::Create(NewFT, F.getLinkage(), "", &M);
722 NewF->copyAttributesFrom(&F);
723 NewF->removeAttributes(
724 AttributeSet::ReturnIndex,
725 AttributeSet::get(NewF->getContext(), AttributeSet::ReturnIndex,
726 AttributeFuncs::typeIncompatible(NewFT->getReturnType())));
727 for (Function::arg_iterator FArg = F.arg_begin(),
728 NewFArg = NewF->arg_begin(),
729 FArgEnd = F.arg_end();
730 FArg != FArgEnd; ++FArg, ++NewFArg) {
731 FArg->replaceAllUsesWith(&*NewFArg);
733 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList());
735 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end();
737 BlockAddress *BA = dyn_cast<BlockAddress>(*UI);
740 BA->replaceAllUsesWith(
741 BlockAddress::get(NewF, BA->getBasicBlock()));
745 F.replaceAllUsesWith(
746 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)));
750 addGlobalNamePrefix(NewF);
752 addGlobalNamePrefix(&F);
754 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
755 // Build a wrapper function for F. The wrapper simply calls F, and is
756 // added to FnsToInstrument so that any instrumentation according to its
757 // WrapperKind is done in the second pass below.
758 FunctionType *NewFT = getInstrumentedABI() == IA_Args
759 ? getArgsFunctionType(FT)
761 Function *NewF = buildWrapperFunction(
762 &F, std::string("dfsw$") + std::string(F.getName()),
763 GlobalValue::LinkOnceODRLinkage, NewFT);
764 if (getInstrumentedABI() == IA_TLS)
765 NewF->removeAttributes(AttributeSet::FunctionIndex, ReadOnlyNoneAttrs);
767 Value *WrappedFnCst =
768 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT));
769 F.replaceAllUsesWith(WrappedFnCst);
771 // Patch the pointer to LLVM function in debug info descriptor.
772 auto DI = FunctionDIs.find(&F);
773 if (DI != FunctionDIs.end())
774 DI->second->replaceFunction(&F);
776 UnwrappedFnMap[WrappedFnCst] = &F;
779 if (!F.isDeclaration()) {
780 // This function is probably defining an interposition of an
781 // uninstrumented function and hence needs to keep the original ABI.
782 // But any functions it may call need to use the instrumented ABI, so
783 // we instrument it in a mode which preserves the original ABI.
784 FnsWithNativeABI.insert(&F);
786 // This code needs to rebuild the iterators, as they may be invalidated
787 // by the push_back, taking care that the new range does not include
788 // any functions added by this code.
789 size_t N = i - FnsToInstrument.begin(),
790 Count = e - FnsToInstrument.begin();
791 FnsToInstrument.push_back(&F);
792 i = FnsToInstrument.begin() + N;
793 e = FnsToInstrument.begin() + Count;
795 // Hopefully, nobody will try to indirectly call a vararg
797 } else if (FT->isVarArg()) {
798 UnwrappedFnMap[&F] = &F;
803 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(),
804 e = FnsToInstrument.end();
806 if (!*i || (*i)->isDeclaration())
809 removeUnreachableBlocks(**i);
811 DFSanFunction DFSF(*this, *i, FnsWithNativeABI.count(*i));
813 // DFSanVisitor may create new basic blocks, which confuses df_iterator.
814 // Build a copy of the list before iterating over it.
815 llvm::SmallVector<BasicBlock *, 4> BBList(
816 depth_first(&(*i)->getEntryBlock()));
818 for (llvm::SmallVector<BasicBlock *, 4>::iterator i = BBList.begin(),
821 Instruction *Inst = &(*i)->front();
823 // DFSanVisitor may split the current basic block, changing the current
824 // instruction's next pointer and moving the next instruction to the
825 // tail block from which we should continue.
826 Instruction *Next = Inst->getNextNode();
827 // DFSanVisitor may delete Inst, so keep track of whether it was a
829 bool IsTerminator = isa<TerminatorInst>(Inst);
830 if (!DFSF.SkipInsts.count(Inst))
831 DFSanVisitor(DFSF).visit(Inst);
838 // We will not necessarily be able to compute the shadow for every phi node
839 // until we have visited every block. Therefore, the code that handles phi
840 // nodes adds them to the PHIFixups list so that they can be properly
842 for (std::vector<std::pair<PHINode *, PHINode *> >::iterator
843 i = DFSF.PHIFixups.begin(),
844 e = DFSF.PHIFixups.end();
846 for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n;
848 i->second->setIncomingValue(
849 val, DFSF.getShadow(i->first->getIncomingValue(val)));
853 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy
854 // places (i.e. instructions in basic blocks we haven't even begun visiting
855 // yet). To make our life easier, do this work in a pass after the main
857 if (ClDebugNonzeroLabels) {
858 for (Value *V : DFSF.NonZeroChecks) {
860 if (Instruction *I = dyn_cast<Instruction>(V))
861 Pos = I->getNextNode();
863 Pos = &DFSF.F->getEntryBlock().front();
864 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
865 Pos = Pos->getNextNode();
866 IRBuilder<> IRB(Pos);
867 Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow);
868 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
869 Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
870 IRBuilder<> ThenIRB(BI);
871 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {});
879 Value *DFSanFunction::getArgTLSPtr() {
883 return ArgTLSPtr = DFS.ArgTLS;
885 IRBuilder<> IRB(&F->getEntryBlock().front());
886 return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLS, {});
889 Value *DFSanFunction::getRetvalTLS() {
893 return RetvalTLSPtr = DFS.RetvalTLS;
895 IRBuilder<> IRB(&F->getEntryBlock().front());
896 return RetvalTLSPtr = IRB.CreateCall(DFS.GetRetvalTLS, {});
899 Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) {
900 IRBuilder<> IRB(Pos);
901 return IRB.CreateConstGEP2_64(getArgTLSPtr(), 0, Idx);
904 Value *DFSanFunction::getShadow(Value *V) {
905 if (!isa<Argument>(V) && !isa<Instruction>(V))
906 return DFS.ZeroShadow;
907 Value *&Shadow = ValShadowMap[V];
909 if (Argument *A = dyn_cast<Argument>(V)) {
911 return DFS.ZeroShadow;
913 case DataFlowSanitizer::IA_TLS: {
914 Value *ArgTLSPtr = getArgTLSPtr();
915 Instruction *ArgTLSPos =
916 DFS.ArgTLS ? &*F->getEntryBlock().begin()
917 : cast<Instruction>(ArgTLSPtr)->getNextNode();
918 IRBuilder<> IRB(ArgTLSPos);
919 Shadow = IRB.CreateLoad(getArgTLS(A->getArgNo(), ArgTLSPos));
922 case DataFlowSanitizer::IA_Args: {
923 unsigned ArgIdx = A->getArgNo() + F->getArgumentList().size() / 2;
924 Function::arg_iterator i = F->arg_begin();
928 assert(Shadow->getType() == DFS.ShadowTy);
932 NonZeroChecks.push_back(Shadow);
934 Shadow = DFS.ZeroShadow;
940 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) {
941 assert(!ValShadowMap.count(I));
942 assert(Shadow->getType() == DFS.ShadowTy);
943 ValShadowMap[I] = Shadow;
946 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) {
947 assert(Addr != RetvalTLS && "Reinstrumenting?");
948 IRBuilder<> IRB(Pos);
949 return IRB.CreateIntToPtr(
951 IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy), ShadowPtrMask),
956 // Generates IR to compute the union of the two given shadows, inserting it
957 // before Pos. Returns the computed union Value.
958 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
959 if (V1 == DFS.ZeroShadow)
961 if (V2 == DFS.ZeroShadow)
966 auto V1Elems = ShadowElements.find(V1);
967 auto V2Elems = ShadowElements.find(V2);
968 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) {
969 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
970 V2Elems->second.begin(), V2Elems->second.end())) {
972 } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
973 V1Elems->second.begin(), V1Elems->second.end())) {
976 } else if (V1Elems != ShadowElements.end()) {
977 if (V1Elems->second.count(V2))
979 } else if (V2Elems != ShadowElements.end()) {
980 if (V2Elems->second.count(V1))
984 auto Key = std::make_pair(V1, V2);
986 std::swap(Key.first, Key.second);
987 CachedCombinedShadow &CCS = CachedCombinedShadows[Key];
988 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent()))
991 IRBuilder<> IRB(Pos);
992 if (AvoidNewBlocks) {
993 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2});
994 Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
995 Call->addAttribute(1, Attribute::ZExt);
996 Call->addAttribute(2, Attribute::ZExt);
998 CCS.Block = Pos->getParent();
1001 BasicBlock *Head = Pos->getParent();
1002 Value *Ne = IRB.CreateICmpNE(V1, V2);
1003 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1004 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT));
1005 IRBuilder<> ThenIRB(BI);
1006 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2});
1007 Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
1008 Call->addAttribute(1, Attribute::ZExt);
1009 Call->addAttribute(2, Attribute::ZExt);
1011 BasicBlock *Tail = BI->getSuccessor(0);
1012 PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front());
1013 Phi->addIncoming(Call, Call->getParent());
1014 Phi->addIncoming(V1, Head);
1020 std::set<Value *> UnionElems;
1021 if (V1Elems != ShadowElements.end()) {
1022 UnionElems = V1Elems->second;
1024 UnionElems.insert(V1);
1026 if (V2Elems != ShadowElements.end()) {
1027 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end());
1029 UnionElems.insert(V2);
1031 ShadowElements[CCS.Shadow] = std::move(UnionElems);
1036 // A convenience function which folds the shadows of each of the operands
1037 // of the provided instruction Inst, inserting the IR before Inst. Returns
1038 // the computed union Value.
1039 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
1040 if (Inst->getNumOperands() == 0)
1041 return DFS.ZeroShadow;
1043 Value *Shadow = getShadow(Inst->getOperand(0));
1044 for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) {
1045 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst);
1050 void DFSanVisitor::visitOperandShadowInst(Instruction &I) {
1051 Value *CombinedShadow = DFSF.combineOperandShadows(&I);
1052 DFSF.setShadow(&I, CombinedShadow);
1055 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where
1056 // Addr has alignment Align, and take the union of each of those shadows.
1057 Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
1059 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
1060 llvm::DenseMap<AllocaInst *, AllocaInst *>::iterator i =
1061 AllocaShadowMap.find(AI);
1062 if (i != AllocaShadowMap.end()) {
1063 IRBuilder<> IRB(Pos);
1064 return IRB.CreateLoad(i->second);
1068 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
1069 SmallVector<Value *, 2> Objs;
1070 GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
1071 bool AllConstants = true;
1072 for (SmallVector<Value *, 2>::iterator i = Objs.begin(), e = Objs.end();
1074 if (isa<Function>(*i) || isa<BlockAddress>(*i))
1076 if (isa<GlobalVariable>(*i) && cast<GlobalVariable>(*i)->isConstant())
1079 AllConstants = false;
1083 return DFS.ZeroShadow;
1085 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
1088 return DFS.ZeroShadow;
1090 LoadInst *LI = new LoadInst(ShadowAddr, "", Pos);
1091 LI->setAlignment(ShadowAlign);
1095 IRBuilder<> IRB(Pos);
1096 Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr,
1097 ConstantInt::get(DFS.IntptrTy, 1));
1098 return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
1099 IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos);
1102 if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) {
1103 // Fast path for the common case where each byte has identical shadow: load
1104 // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any
1105 // shadow is non-equal.
1106 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F);
1107 IRBuilder<> FallbackIRB(FallbackBB);
1108 CallInst *FallbackCall = FallbackIRB.CreateCall(
1109 DFS.DFSanUnionLoadFn,
1110 {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
1111 FallbackCall->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
1113 // Compare each of the shadows stored in the loaded 64 bits to each other,
1114 // by computing (WideShadow rotl ShadowWidth) == WideShadow.
1115 IRBuilder<> IRB(Pos);
1117 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
1118 Value *WideShadow = IRB.CreateAlignedLoad(WideAddr, ShadowAlign);
1119 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy);
1120 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth);
1121 Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth);
1122 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow);
1123 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow);
1125 BasicBlock *Head = Pos->getParent();
1126 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator());
1128 if (DomTreeNode *OldNode = DT.getNode(Head)) {
1129 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end());
1131 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head);
1132 for (auto Child : Children)
1133 DT.changeImmediateDominator(Child, NewNode);
1136 // In the following code LastBr will refer to the previous basic block's
1137 // conditional branch instruction, whose true successor is fixed up to point
1138 // to the next block during the loop below or to the tail after the final
1140 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq);
1141 ReplaceInstWithInst(Head->getTerminator(), LastBr);
1142 DT.addNewBlock(FallbackBB, Head);
1144 for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size;
1145 Ofs += 64 / DFS.ShadowWidth) {
1146 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
1147 DT.addNewBlock(NextBB, LastBr->getParent());
1148 IRBuilder<> NextIRB(NextBB);
1149 WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
1150 ConstantInt::get(DFS.IntptrTy, 1));
1151 Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign);
1152 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
1153 LastBr->setSuccessor(0, NextBB);
1154 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB);
1157 LastBr->setSuccessor(0, Tail);
1158 FallbackIRB.CreateBr(Tail);
1159 PHINode *Shadow = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front());
1160 Shadow->addIncoming(FallbackCall, FallbackBB);
1161 Shadow->addIncoming(TruncShadow, LastBr->getParent());
1165 IRBuilder<> IRB(Pos);
1166 CallInst *FallbackCall = IRB.CreateCall(
1167 DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
1168 FallbackCall->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
1169 return FallbackCall;
1172 void DFSanVisitor::visitLoadInst(LoadInst &LI) {
1173 auto &DL = LI.getModule()->getDataLayout();
1174 uint64_t Size = DL.getTypeStoreSize(LI.getType());
1176 DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow);
1181 if (ClPreserveAlignment) {
1182 Align = LI.getAlignment();
1184 Align = DL.getABITypeAlignment(LI.getType());
1188 IRBuilder<> IRB(&LI);
1189 Value *Shadow = DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI);
1190 if (ClCombinePointerLabelsOnLoad) {
1191 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
1192 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI);
1194 if (Shadow != DFSF.DFS.ZeroShadow)
1195 DFSF.NonZeroChecks.push_back(Shadow);
1197 DFSF.setShadow(&LI, Shadow);
1200 void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
1201 Value *Shadow, Instruction *Pos) {
1202 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
1203 llvm::DenseMap<AllocaInst *, AllocaInst *>::iterator i =
1204 AllocaShadowMap.find(AI);
1205 if (i != AllocaShadowMap.end()) {
1206 IRBuilder<> IRB(Pos);
1207 IRB.CreateStore(Shadow, i->second);
1212 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
1213 IRBuilder<> IRB(Pos);
1214 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
1215 if (Shadow == DFS.ZeroShadow) {
1216 IntegerType *ShadowTy = IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidth);
1217 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
1218 Value *ExtShadowAddr =
1219 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
1220 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);
1224 const unsigned ShadowVecSize = 128 / DFS.ShadowWidth;
1225 uint64_t Offset = 0;
1226 if (Size >= ShadowVecSize) {
1227 VectorType *ShadowVecTy = VectorType::get(DFS.ShadowTy, ShadowVecSize);
1228 Value *ShadowVec = UndefValue::get(ShadowVecTy);
1229 for (unsigned i = 0; i != ShadowVecSize; ++i) {
1230 ShadowVec = IRB.CreateInsertElement(
1231 ShadowVec, Shadow, ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i));
1233 Value *ShadowVecAddr =
1234 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy));
1236 Value *CurShadowVecAddr =
1237 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset);
1238 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign);
1239 Size -= ShadowVecSize;
1241 } while (Size >= ShadowVecSize);
1242 Offset *= ShadowVecSize;
1245 Value *CurShadowAddr =
1246 IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset);
1247 IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign);
1253 void DFSanVisitor::visitStoreInst(StoreInst &SI) {
1254 auto &DL = SI.getModule()->getDataLayout();
1255 uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType());
1260 if (ClPreserveAlignment) {
1261 Align = SI.getAlignment();
1263 Align = DL.getABITypeAlignment(SI.getValueOperand()->getType());
1268 Value* Shadow = DFSF.getShadow(SI.getValueOperand());
1269 if (ClCombinePointerLabelsOnStore) {
1270 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
1271 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI);
1273 DFSF.storeShadow(SI.getPointerOperand(), Size, Align, Shadow, &SI);
1276 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) {
1277 visitOperandShadowInst(BO);
1280 void DFSanVisitor::visitCastInst(CastInst &CI) { visitOperandShadowInst(CI); }
1282 void DFSanVisitor::visitCmpInst(CmpInst &CI) { visitOperandShadowInst(CI); }
1284 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1285 visitOperandShadowInst(GEPI);
1288 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) {
1289 visitOperandShadowInst(I);
1292 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) {
1293 visitOperandShadowInst(I);
1296 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) {
1297 visitOperandShadowInst(I);
1300 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) {
1301 visitOperandShadowInst(I);
1304 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) {
1305 visitOperandShadowInst(I);
1308 void DFSanVisitor::visitAllocaInst(AllocaInst &I) {
1309 bool AllLoadsStores = true;
1310 for (User *U : I.users()) {
1311 if (isa<LoadInst>(U))
1314 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1315 if (SI->getPointerOperand() == &I)
1319 AllLoadsStores = false;
1322 if (AllLoadsStores) {
1323 IRBuilder<> IRB(&I);
1324 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.ShadowTy);
1326 DFSF.setShadow(&I, DFSF.DFS.ZeroShadow);
1329 void DFSanVisitor::visitSelectInst(SelectInst &I) {
1330 Value *CondShadow = DFSF.getShadow(I.getCondition());
1331 Value *TrueShadow = DFSF.getShadow(I.getTrueValue());
1332 Value *FalseShadow = DFSF.getShadow(I.getFalseValue());
1334 if (isa<VectorType>(I.getCondition()->getType())) {
1337 DFSF.combineShadows(
1338 CondShadow, DFSF.combineShadows(TrueShadow, FalseShadow, &I), &I));
1341 if (TrueShadow == FalseShadow) {
1342 ShadowSel = TrueShadow;
1345 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I);
1347 DFSF.setShadow(&I, DFSF.combineShadows(CondShadow, ShadowSel, &I));
1351 void DFSanVisitor::visitMemSetInst(MemSetInst &I) {
1352 IRBuilder<> IRB(&I);
1353 Value *ValShadow = DFSF.getShadow(I.getValue());
1354 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn,
1355 {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(
1357 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
1360 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
1361 IRBuilder<> IRB(&I);
1362 Value *DestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
1363 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I);
1364 Value *LenShadow = IRB.CreateMul(
1366 ConstantInt::get(I.getLength()->getType(), DFSF.DFS.ShadowWidth / 8));
1368 if (ClPreserveAlignment) {
1369 AlignShadow = IRB.CreateMul(I.getAlignmentCst(),
1370 ConstantInt::get(I.getAlignmentCst()->getType(),
1371 DFSF.DFS.ShadowWidth / 8));
1373 AlignShadow = ConstantInt::get(I.getAlignmentCst()->getType(),
1374 DFSF.DFS.ShadowWidth / 8);
1376 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
1377 DestShadow = IRB.CreateBitCast(DestShadow, Int8Ptr);
1378 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
1379 IRB.CreateCall(I.getCalledValue(), {DestShadow, SrcShadow, LenShadow,
1380 AlignShadow, I.getVolatileCst()});
1383 void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
1384 if (!DFSF.IsNativeABI && RI.getReturnValue()) {
1386 case DataFlowSanitizer::IA_TLS: {
1387 Value *S = DFSF.getShadow(RI.getReturnValue());
1388 IRBuilder<> IRB(&RI);
1389 IRB.CreateStore(S, DFSF.getRetvalTLS());
1392 case DataFlowSanitizer::IA_Args: {
1393 IRBuilder<> IRB(&RI);
1394 Type *RT = DFSF.F->getFunctionType()->getReturnType();
1396 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0);
1398 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1);
1399 RI.setOperand(0, InsShadow);
1406 void DFSanVisitor::visitCallSite(CallSite CS) {
1407 Function *F = CS.getCalledFunction();
1408 if ((F && F->isIntrinsic()) || isa<InlineAsm>(CS.getCalledValue())) {
1409 visitOperandShadowInst(*CS.getInstruction());
1413 // Calls to this function are synthesized in wrappers, and we shouldn't
1415 if (F == DFSF.DFS.DFSanVarargWrapperFn)
1418 assert(!(cast<FunctionType>(
1419 CS.getCalledValue()->getType()->getPointerElementType())->isVarArg() &&
1420 dyn_cast<InvokeInst>(CS.getInstruction())));
1422 IRBuilder<> IRB(CS.getInstruction());
1424 DenseMap<Value *, Function *>::iterator i =
1425 DFSF.DFS.UnwrappedFnMap.find(CS.getCalledValue());
1426 if (i != DFSF.DFS.UnwrappedFnMap.end()) {
1427 Function *F = i->second;
1428 switch (DFSF.DFS.getWrapperKind(F)) {
1429 case DataFlowSanitizer::WK_Warning: {
1430 CS.setCalledFunction(F);
1431 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
1432 IRB.CreateGlobalStringPtr(F->getName()));
1433 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow);
1436 case DataFlowSanitizer::WK_Discard: {
1437 CS.setCalledFunction(F);
1438 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow);
1441 case DataFlowSanitizer::WK_Functional: {
1442 CS.setCalledFunction(F);
1443 visitOperandShadowInst(*CS.getInstruction());
1446 case DataFlowSanitizer::WK_Custom: {
1447 // Don't try to handle invokes of custom functions, it's too complicated.
1448 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_
1450 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1451 FunctionType *FT = F->getFunctionType();
1452 FunctionType *CustomFT = DFSF.DFS.getCustomFunctionType(FT);
1453 std::string CustomFName = "__dfsw_";
1454 CustomFName += F->getName();
1456 DFSF.DFS.Mod->getOrInsertFunction(CustomFName, CustomFT);
1457 if (Function *CustomFn = dyn_cast<Function>(CustomF)) {
1458 CustomFn->copyAttributesFrom(F);
1460 // Custom functions returning non-void will write to the return label.
1461 if (!FT->getReturnType()->isVoidTy()) {
1462 CustomFn->removeAttributes(AttributeSet::FunctionIndex,
1463 DFSF.DFS.ReadOnlyNoneAttrs);
1467 std::vector<Value *> Args;
1469 CallSite::arg_iterator i = CS.arg_begin();
1470 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) {
1471 Type *T = (*i)->getType();
1472 FunctionType *ParamFT;
1473 if (isa<PointerType>(T) &&
1474 (ParamFT = dyn_cast<FunctionType>(
1475 cast<PointerType>(T)->getElementType()))) {
1476 std::string TName = "dfst";
1477 TName += utostr(FT->getNumParams() - n);
1479 TName += F->getName();
1480 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName);
1483 IRB.CreateBitCast(*i, Type::getInt8PtrTy(*DFSF.DFS.Ctx)));
1490 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1491 Args.push_back(DFSF.getShadow(*i));
1493 if (FT->isVarArg()) {
1494 auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy,
1495 CS.arg_size() - FT->getNumParams());
1496 auto *LabelVAAlloca = new AllocaInst(
1497 LabelVATy, "labelva", &DFSF.F->getEntryBlock().front());
1499 for (unsigned n = 0; i != CS.arg_end(); ++i, ++n) {
1500 auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n);
1501 IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr);
1504 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
1507 if (!FT->getReturnType()->isVoidTy()) {
1508 if (!DFSF.LabelReturnAlloca) {
1509 DFSF.LabelReturnAlloca =
1510 new AllocaInst(DFSF.DFS.ShadowTy, "labelreturn",
1511 &DFSF.F->getEntryBlock().front());
1513 Args.push_back(DFSF.LabelReturnAlloca);
1516 for (i = CS.arg_begin() + FT->getNumParams(); i != CS.arg_end(); ++i)
1519 CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
1520 CustomCI->setCallingConv(CI->getCallingConv());
1521 CustomCI->setAttributes(CI->getAttributes());
1523 if (!FT->getReturnType()->isVoidTy()) {
1524 LoadInst *LabelLoad = IRB.CreateLoad(DFSF.LabelReturnAlloca);
1525 DFSF.setShadow(CustomCI, LabelLoad);
1528 CI->replaceAllUsesWith(CustomCI);
1529 CI->eraseFromParent();
1537 FunctionType *FT = cast<FunctionType>(
1538 CS.getCalledValue()->getType()->getPointerElementType());
1539 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
1540 for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) {
1541 IRB.CreateStore(DFSF.getShadow(CS.getArgument(i)),
1542 DFSF.getArgTLS(i, CS.getInstruction()));
1546 Instruction *Next = nullptr;
1547 if (!CS.getType()->isVoidTy()) {
1548 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
1549 if (II->getNormalDest()->getSinglePredecessor()) {
1550 Next = &II->getNormalDest()->front();
1553 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT);
1554 Next = &NewBB->front();
1557 assert(CS->getIterator() != CS->getParent()->end());
1558 Next = CS->getNextNode();
1561 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
1562 IRBuilder<> NextIRB(Next);
1563 LoadInst *LI = NextIRB.CreateLoad(DFSF.getRetvalTLS());
1564 DFSF.SkipInsts.insert(LI);
1565 DFSF.setShadow(CS.getInstruction(), LI);
1566 DFSF.NonZeroChecks.push_back(LI);
1570 // Do all instrumentation for IA_Args down here to defer tampering with the
1571 // CFG in a way that SplitEdge may be able to detect.
1572 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) {
1573 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT);
1575 IRB.CreateBitCast(CS.getCalledValue(), PointerType::getUnqual(NewFT));
1576 std::vector<Value *> Args;
1578 CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1579 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1583 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1584 Args.push_back(DFSF.getShadow(*i));
1586 if (FT->isVarArg()) {
1587 unsigned VarArgSize = CS.arg_size() - FT->getNumParams();
1588 ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize);
1589 AllocaInst *VarArgShadow =
1590 new AllocaInst(VarArgArrayTy, "", &DFSF.F->getEntryBlock().front());
1591 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0));
1592 for (unsigned n = 0; i != e; ++i, ++n) {
1595 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n));
1601 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
1602 NewCS = IRB.CreateInvoke(Func, II->getNormalDest(), II->getUnwindDest(),
1605 NewCS = IRB.CreateCall(Func, Args);
1607 NewCS.setCallingConv(CS.getCallingConv());
1608 NewCS.setAttributes(CS.getAttributes().removeAttributes(
1609 *DFSF.DFS.Ctx, AttributeSet::ReturnIndex,
1610 AttributeFuncs::typeIncompatible(NewCS.getInstruction()->getType())));
1613 ExtractValueInst *ExVal =
1614 ExtractValueInst::Create(NewCS.getInstruction(), 0, "", Next);
1615 DFSF.SkipInsts.insert(ExVal);
1616 ExtractValueInst *ExShadow =
1617 ExtractValueInst::Create(NewCS.getInstruction(), 1, "", Next);
1618 DFSF.SkipInsts.insert(ExShadow);
1619 DFSF.setShadow(ExVal, ExShadow);
1620 DFSF.NonZeroChecks.push_back(ExShadow);
1622 CS.getInstruction()->replaceAllUsesWith(ExVal);
1625 CS.getInstruction()->eraseFromParent();
1629 void DFSanVisitor::visitPHINode(PHINode &PN) {
1631 PHINode::Create(DFSF.DFS.ShadowTy, PN.getNumIncomingValues(), "", &PN);
1633 // Give the shadow phi node valid predecessors to fool SplitEdge into working.
1634 Value *UndefShadow = UndefValue::get(DFSF.DFS.ShadowTy);
1635 for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e;
1637 ShadowPN->addIncoming(UndefShadow, *i);
1640 DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN));
1641 DFSF.setShadow(&PN, ShadowPN);