1 //===-- CDSPass.cpp - xxx -------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 // This file is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 // This file is a modified version of ThreadSanitizer.cpp, a part of a race detector.
13 // The tool is under development, for the details about previous versions see
14 // http://code.google.com/p/data-race-test
16 // The instrumentation phase is quite simple:
17 // - Insert calls to run-time library before every memory access.
18 // - Optimizations may apply to avoid instrumenting some of the accesses.
19 // - Insert calls at function entry/exit.
20 // The rest is handled by the run-time library.
21 //===----------------------------------------------------------------------===//
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/SmallString.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/LoopInfo.h"
29 #include "llvm/IR/BasicBlock.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/LegacyPassManager.h"
36 #include "llvm/IR/Module.h"
37 #include "llvm/IR/PassManager.h"
38 #include "llvm/Pass.h"
39 #include "llvm/ProfileData/InstrProf.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Support/AtomicOrdering.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Transforms/Scalar.h"
44 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
45 #include "llvm/Transforms/Utils/EscapeEnumerator.h"
46 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
52 #define DEBUG_TYPE "CDS"
53 #include <llvm/IR/DebugLoc.h>
55 static inline Value *getPosition( Instruction * I, IRBuilder <> IRB, bool print = false)
57 const DebugLoc & debug_location = I->getDebugLoc ();
58 std::string position_string;
60 llvm::raw_string_ostream position_stream (position_string);
61 debug_location . print (position_stream);
65 errs() << position_string << "\n";
68 return IRB.CreateGlobalStringPtr (position_string);
71 static inline bool checkSignature(Function * func, Value * args[]) {
72 FunctionType * FType = func->getFunctionType();
73 for (unsigned i = 0 ; i < FType->getNumParams(); i++) {
74 if (FType->getParamType(i) != args[i]->getType()) {
76 errs() << "expects: " << *FType->getParamType(i)
77 << "\tbut receives: " << *args[i]->getType() << "\n";
86 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
87 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
88 STATISTIC(NumOmittedReadsBeforeWrite,
89 "Number of reads ignored due to following writes");
90 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
91 // STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
92 // STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
93 STATISTIC(NumOmittedReadsFromConstantGlobals,
94 "Number of reads from constant globals");
95 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
96 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
98 // static const char *const kCDSModuleCtorName = "cds.module_ctor";
99 // static const char *const kCDSInitName = "cds_init";
110 static const size_t kNumberOfAccessSizes = 4;
112 int getAtomicOrderIndex(AtomicOrdering order) {
114 case AtomicOrdering::Monotonic:
115 return (int)AtomicOrderingCABI::relaxed;
116 //case AtomicOrdering::Consume: // not specified yet
117 // return AtomicOrderingCABI::consume;
118 case AtomicOrdering::Acquire:
119 return (int)AtomicOrderingCABI::acquire;
120 case AtomicOrdering::Release:
121 return (int)AtomicOrderingCABI::release;
122 case AtomicOrdering::AcquireRelease:
123 return (int)AtomicOrderingCABI::acq_rel;
124 case AtomicOrdering::SequentiallyConsistent:
125 return (int)AtomicOrderingCABI::seq_cst;
127 // unordered or Not Atomic
132 AtomicOrderingCABI indexToAtomicOrder(int index) {
135 return AtomicOrderingCABI::relaxed;
137 return AtomicOrderingCABI::consume;
139 return AtomicOrderingCABI::acquire;
141 return AtomicOrderingCABI::release;
143 return AtomicOrderingCABI::acq_rel;
145 return AtomicOrderingCABI::seq_cst;
147 errs() << "Bad Atomic index\n";
148 return AtomicOrderingCABI::seq_cst;
152 /* According to atomic_base.h: __cmpexch_failure_order */
153 int AtomicCasFailureOrderIndex(int index) {
154 AtomicOrderingCABI succ_order = indexToAtomicOrder(index);
155 AtomicOrderingCABI fail_order;
156 if (succ_order == AtomicOrderingCABI::acq_rel)
157 fail_order = AtomicOrderingCABI::acquire;
158 else if (succ_order == AtomicOrderingCABI::release)
159 fail_order = AtomicOrderingCABI::relaxed;
161 fail_order = succ_order;
163 return (int) fail_order;
166 /* The original function checkSanitizerInterfaceFunction was defined
167 * in llvm/Transforms/Utils/ModuleUtils.h
169 static Function * checkCDSPassInterfaceFunction(Constant *FuncOrBitcast) {
170 if (isa<Function>(FuncOrBitcast))
171 return cast<Function>(FuncOrBitcast);
172 FuncOrBitcast->print(errs());
175 raw_string_ostream Stream(Err);
176 Stream << "CDSPass interface function redefined: " << *FuncOrBitcast;
177 report_fatal_error(Err);
181 struct CDSPass : public FunctionPass {
182 CDSPass() : FunctionPass(ID) {}
183 StringRef getPassName() const override;
184 bool runOnFunction(Function &F) override;
185 bool doInitialization(Module &M) override;
189 void initializeCallbacks(Module &M);
190 bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
191 bool instrumentVolatile(Instruction *I, const DataLayout &DL);
192 bool instrumentMemIntrinsic(Instruction *I);
193 bool isAtomicCall(Instruction *I);
194 bool instrumentAtomic(Instruction *I, const DataLayout &DL);
195 bool instrumentAtomicCall(CallInst *CI, const DataLayout &DL);
196 bool shouldInstrumentBeforeAtomics(Instruction *I);
197 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
198 SmallVectorImpl<Instruction *> &All,
199 const DataLayout &DL);
200 bool addrPointsToConstantData(Value *Addr);
201 int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
202 bool instrumentLoops(Function &F);
204 Function * CDSFuncEntry;
205 Function * CDSFuncExit;
207 Function * CDSLoad[kNumberOfAccessSizes];
208 Function * CDSStore[kNumberOfAccessSizes];
209 Function * CDSVolatileLoad[kNumberOfAccessSizes];
210 Function * CDSVolatileStore[kNumberOfAccessSizes];
211 Function * CDSAtomicInit[kNumberOfAccessSizes];
212 Function * CDSAtomicLoad[kNumberOfAccessSizes];
213 Function * CDSAtomicStore[kNumberOfAccessSizes];
214 Function * CDSAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
215 Function * CDSAtomicCAS_V1[kNumberOfAccessSizes];
216 Function * CDSAtomicCAS_V2[kNumberOfAccessSizes];
217 Function * CDSAtomicThreadFence;
218 Function * MemmoveFn, * MemcpyFn, * MemsetFn;
219 // Function * CDSCtorFunction;
221 std::vector<StringRef> AtomicFuncNames;
222 std::vector<StringRef> PartialAtomicFuncNames;
226 StringRef CDSPass::getPassName() const {
230 void CDSPass::initializeCallbacks(Module &M) {
231 LLVMContext &Ctx = M.getContext();
233 Attr = Attr.addAttribute(Ctx, AttributeList::FunctionIndex,
234 Attribute::NoUnwind);
236 Type * Int1Ty = Type::getInt1Ty(Ctx);
237 Type * Int32Ty = Type::getInt32Ty(Ctx);
238 OrdTy = Type::getInt32Ty(Ctx);
240 Int8PtrTy = Type::getInt8PtrTy(Ctx);
241 Int16PtrTy = Type::getInt16PtrTy(Ctx);
242 Int32PtrTy = Type::getInt32PtrTy(Ctx);
243 Int64PtrTy = Type::getInt64PtrTy(Ctx);
245 VoidTy = Type::getVoidTy(Ctx);
247 CDSFuncEntry = checkCDSPassInterfaceFunction(
248 M.getOrInsertFunction("cds_func_entry",
249 Attr, VoidTy, Int8PtrTy));
250 CDSFuncExit = checkCDSPassInterfaceFunction(
251 M.getOrInsertFunction("cds_func_exit",
252 Attr, VoidTy, Int8PtrTy));
254 // Get the function to call from our untime library.
255 for (unsigned i = 0; i < kNumberOfAccessSizes; i++) {
256 const unsigned ByteSize = 1U << i;
257 const unsigned BitSize = ByteSize * 8;
259 std::string ByteSizeStr = utostr(ByteSize);
260 std::string BitSizeStr = utostr(BitSize);
262 Type *Ty = Type::getIntNTy(Ctx, BitSize);
263 Type *PtrTy = Ty->getPointerTo();
265 // uint8_t cds_atomic_load8 (void * obj, int atomic_index)
266 // void cds_atomic_store8 (void * obj, int atomic_index, uint8_t val)
267 SmallString<32> LoadName("cds_load" + BitSizeStr);
268 SmallString<32> StoreName("cds_store" + BitSizeStr);
269 SmallString<32> VolatileLoadName("cds_volatile_load" + BitSizeStr);
270 SmallString<32> VolatileStoreName("cds_volatile_store" + BitSizeStr);
271 SmallString<32> AtomicInitName("cds_atomic_init" + BitSizeStr);
272 SmallString<32> AtomicLoadName("cds_atomic_load" + BitSizeStr);
273 SmallString<32> AtomicStoreName("cds_atomic_store" + BitSizeStr);
275 CDSLoad[i] = checkCDSPassInterfaceFunction(
276 M.getOrInsertFunction(LoadName, Attr, VoidTy, PtrTy));
277 CDSStore[i] = checkCDSPassInterfaceFunction(
278 M.getOrInsertFunction(StoreName, Attr, VoidTy, PtrTy));
279 CDSVolatileLoad[i] = checkCDSPassInterfaceFunction(
280 M.getOrInsertFunction(VolatileLoadName,
281 Attr, Ty, PtrTy, Int8PtrTy));
282 CDSVolatileStore[i] = checkCDSPassInterfaceFunction(
283 M.getOrInsertFunction(VolatileStoreName,
284 Attr, VoidTy, PtrTy, Ty, Int8PtrTy));
285 CDSAtomicInit[i] = checkCDSPassInterfaceFunction(
286 M.getOrInsertFunction(AtomicInitName,
287 Attr, VoidTy, PtrTy, Ty, Int8PtrTy));
288 CDSAtomicLoad[i] = checkCDSPassInterfaceFunction(
289 M.getOrInsertFunction(AtomicLoadName,
290 Attr, Ty, PtrTy, OrdTy, Int8PtrTy));
291 CDSAtomicStore[i] = checkCDSPassInterfaceFunction(
292 M.getOrInsertFunction(AtomicStoreName,
293 Attr, VoidTy, PtrTy, Ty, OrdTy, Int8PtrTy));
295 for (int op = AtomicRMWInst::FIRST_BINOP;
296 op <= AtomicRMWInst::LAST_BINOP; ++op) {
297 CDSAtomicRMW[op][i] = nullptr;
298 std::string NamePart;
300 if (op == AtomicRMWInst::Xchg)
301 NamePart = "_exchange";
302 else if (op == AtomicRMWInst::Add)
303 NamePart = "_fetch_add";
304 else if (op == AtomicRMWInst::Sub)
305 NamePart = "_fetch_sub";
306 else if (op == AtomicRMWInst::And)
307 NamePart = "_fetch_and";
308 else if (op == AtomicRMWInst::Or)
309 NamePart = "_fetch_or";
310 else if (op == AtomicRMWInst::Xor)
311 NamePart = "_fetch_xor";
315 SmallString<32> AtomicRMWName("cds_atomic" + NamePart + BitSizeStr);
316 CDSAtomicRMW[op][i] = checkCDSPassInterfaceFunction(
317 M.getOrInsertFunction(AtomicRMWName,
318 Attr, Ty, PtrTy, Ty, OrdTy, Int8PtrTy));
321 // only supportes strong version
322 SmallString<32> AtomicCASName_V1("cds_atomic_compare_exchange" + BitSizeStr + "_v1");
323 SmallString<32> AtomicCASName_V2("cds_atomic_compare_exchange" + BitSizeStr + "_v2");
324 CDSAtomicCAS_V1[i] = checkCDSPassInterfaceFunction(
325 M.getOrInsertFunction(AtomicCASName_V1,
326 Attr, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, Int8PtrTy));
327 CDSAtomicCAS_V2[i] = checkCDSPassInterfaceFunction(
328 M.getOrInsertFunction(AtomicCASName_V2,
329 Attr, Int1Ty, PtrTy, PtrTy, Ty, OrdTy, OrdTy, Int8PtrTy));
332 CDSAtomicThreadFence = checkCDSPassInterfaceFunction(
333 M.getOrInsertFunction("cds_atomic_thread_fence", Attr, VoidTy, OrdTy, Int8PtrTy));
335 MemmoveFn = checkCDSPassInterfaceFunction(
336 M.getOrInsertFunction("memmove", Attr, Int8PtrTy, Int8PtrTy,
337 Int8PtrTy, IntPtrTy));
338 MemcpyFn = checkCDSPassInterfaceFunction(
339 M.getOrInsertFunction("memcpy", Attr, Int8PtrTy, Int8PtrTy,
340 Int8PtrTy, IntPtrTy));
341 MemsetFn = checkCDSPassInterfaceFunction(
342 M.getOrInsertFunction("memset", Attr, Int8PtrTy, Int8PtrTy,
346 bool CDSPass::doInitialization(Module &M) {
347 const DataLayout &DL = M.getDataLayout();
348 IntPtrTy = DL.getIntPtrType(M.getContext());
350 // createSanitizerCtorAndInitFunctions is defined in "llvm/Transforms/Utils/ModuleUtils.h"
351 // We do not support it yet
353 std::tie(CDSCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(
354 M, kCDSModuleCtorName, kCDSInitName, {}, {});
356 appendToGlobalCtors(M, CDSCtorFunction, 0);
361 "atomic_init", "atomic_load", "atomic_store",
362 "atomic_fetch_", "atomic_exchange", "atomic_compare_exchange_"
365 PartialAtomicFuncNames =
367 "load", "store", "fetch", "exchange", "compare_exchange_"
373 static bool isVtableAccess(Instruction *I) {
374 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
375 return Tag->isTBAAVtableAccess();
379 // Do not instrument known races/"benign races" that come from compiler
380 // instrumentatin. The user has no way of suppressing them.
381 static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
382 // Peel off GEPs and BitCasts.
383 Addr = Addr->stripInBoundsOffsets();
385 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
386 if (GV->hasSection()) {
387 StringRef SectionName = GV->getSection();
388 // Check if the global is in the PGO counters section.
389 auto OF = Triple(M->getTargetTriple()).getObjectFormat();
390 if (SectionName.endswith(
391 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
395 // Check if the global is private gcov data.
396 if (GV->getName().startswith("__llvm_gcov") ||
397 GV->getName().startswith("__llvm_gcda"))
401 // Do not instrument acesses from different address spaces; we cannot deal
404 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
405 if (PtrTy->getPointerAddressSpace() != 0)
412 bool CDSPass::addrPointsToConstantData(Value *Addr) {
413 // If this is a GEP, just analyze its pointer operand.
414 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
415 Addr = GEP->getPointerOperand();
417 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
418 if (GV->isConstant()) {
419 // Reads from constant globals can not race with any writes.
420 NumOmittedReadsFromConstantGlobals++;
423 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
424 if (isVtableAccess(L)) {
425 // Reads from a vtable pointer can not race with any writes.
426 NumOmittedReadsFromVtable++;
433 bool CDSPass::shouldInstrumentBeforeAtomics(Instruction * Inst) {
434 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
435 AtomicOrdering ordering = LI->getOrdering();
436 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
438 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
439 AtomicOrdering ordering = SI->getOrdering();
440 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
442 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Inst)) {
443 AtomicOrdering ordering = RMWI->getOrdering();
444 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
446 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
447 AtomicOrdering ordering = CASI->getSuccessOrdering();
448 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
450 } else if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) {
451 AtomicOrdering ordering = FI->getOrdering();
452 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
459 void CDSPass::chooseInstructionsToInstrument(
460 SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
461 const DataLayout &DL) {
462 SmallPtrSet<Value*, 8> WriteTargets;
463 // Iterate from the end.
464 for (Instruction *I : reverse(Local)) {
465 if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
466 Value *Addr = Store->getPointerOperand();
467 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
469 WriteTargets.insert(Addr);
471 LoadInst *Load = cast<LoadInst>(I);
472 Value *Addr = Load->getPointerOperand();
473 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
475 if (WriteTargets.count(Addr)) {
476 // We will write to this temp, so no reason to analyze the read.
477 NumOmittedReadsBeforeWrite++;
480 if (addrPointsToConstantData(Addr)) {
481 // Addr points to some constant data -- it can not race with any writes.
485 Value *Addr = isa<StoreInst>(*I)
486 ? cast<StoreInst>(I)->getPointerOperand()
487 : cast<LoadInst>(I)->getPointerOperand();
488 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
489 !PointerMayBeCaptured(Addr, true, true)) {
490 // The variable is addressable but not captured, so it cannot be
491 // referenced from a different thread and participate in a data race
492 // (see llvm/Analysis/CaptureTracking.h for details).
493 NumOmittedNonCaptured++;
502 void CDSPass::InsertRuntimeIgnores(Function &F) {
503 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
504 IRB.CreateCall(CDSIgnoreBegin);
505 EscapeEnumerator EE(F, "cds_ignore_cleanup", ClHandleCxxExceptions);
506 while (IRBuilder<> *AtExit = EE.Next()) {
507 AtExit->CreateCall(CDSIgnoreEnd);
511 bool CDSPass::runOnFunction(Function &F) {
512 initializeCallbacks( *F.getParent() );
513 SmallVector<Instruction*, 8> AllLoadsAndStores;
514 SmallVector<Instruction*, 8> LocalLoadsAndStores;
515 SmallVector<Instruction*, 8> VolatileLoadsAndStores;
516 SmallVector<Instruction*, 8> AtomicAccesses;
517 SmallVector<Instruction*, 8> MemIntrinCalls;
520 bool HasAtomic = false;
521 bool HasVolatile = false;
522 const DataLayout &DL = F.getParent()->getDataLayout();
524 // instrumentLoops(F);
527 for (auto &Inst : BB) {
528 if ( (&Inst)->isAtomic() ) {
529 AtomicAccesses.push_back(&Inst);
532 if (shouldInstrumentBeforeAtomics(&Inst)) {
533 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
536 } else if (isAtomicCall(&Inst) ) {
537 AtomicAccesses.push_back(&Inst);
539 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
541 } else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst)) {
542 LoadInst *LI = dyn_cast<LoadInst>(&Inst);
543 StoreInst *SI = dyn_cast<StoreInst>(&Inst);
544 bool isVolatile = ( LI ? LI->isVolatile() : SI->isVolatile() );
547 VolatileLoadsAndStores.push_back(&Inst);
550 LocalLoadsAndStores.push_back(&Inst);
551 } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
552 if (isa<MemIntrinsic>(Inst))
553 MemIntrinCalls.push_back(&Inst);
555 /*if (CallInst *CI = dyn_cast<CallInst>(&Inst))
556 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
559 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
564 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
567 for (auto Inst : AllLoadsAndStores) {
568 Res |= instrumentLoadOrStore(Inst, DL);
571 for (auto Inst : VolatileLoadsAndStores) {
572 Res |= instrumentVolatile(Inst, DL);
575 for (auto Inst : AtomicAccesses) {
576 Res |= instrumentAtomic(Inst, DL);
579 for (auto Inst : MemIntrinCalls) {
580 Res |= instrumentMemIntrinsic(Inst);
583 // Instrument function entry and exit for functions containing atomics or volatiles
584 if (Res && ( HasAtomic || HasVolatile) ) {
585 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
587 Value *ReturnAddress = IRB.CreateCall(
588 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
592 Value * FuncName = IRB.CreateGlobalStringPtr(F.getName());
593 IRB.CreateCall(CDSFuncEntry, FuncName);
595 EscapeEnumerator EE(F, "cds_cleanup", true);
596 while (IRBuilder<> *AtExit = EE.Next()) {
597 AtExit->CreateCall(CDSFuncExit, FuncName);
606 bool CDSPass::instrumentLoadOrStore(Instruction *I,
607 const DataLayout &DL) {
609 bool IsWrite = isa<StoreInst>(*I);
610 Value *Addr = IsWrite
611 ? cast<StoreInst>(I)->getPointerOperand()
612 : cast<LoadInst>(I)->getPointerOperand();
614 // swifterror memory addresses are mem2reg promoted by instruction selection.
615 // As such they cannot have regular uses like an instrumentation function and
616 // it makes no sense to track them as memory.
617 if (Addr->isSwiftError())
620 int Idx = getMemoryAccessFuncIndex(Addr, DL);
624 if (IsWrite && isVtableAccess(I)) {
626 LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n");
627 Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
628 // StoredValue may be a vector type if we are storing several vptrs at once.
629 // In this case, just take the first element of the vector since this is
630 // enough to find vptr races.
631 if (isa<VectorType>(StoredValue->getType()))
632 StoredValue = IRB.CreateExtractElement(
633 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
634 if (StoredValue->getType()->isIntegerTy())
635 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
636 // Call TsanVptrUpdate.
637 IRB.CreateCall(TsanVptrUpdate,
638 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
639 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
640 NumInstrumentedVtableWrites++;
645 if (!IsWrite && isVtableAccess(I)) {
647 IRB.CreateCall(TsanVptrLoad,
648 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
649 NumInstrumentedVtableReads++;
654 // TODO: unaligned reads and writes
656 Value *OnAccessFunc = nullptr;
657 OnAccessFunc = IsWrite ? CDSStore[Idx] : CDSLoad[Idx];
659 Type *ArgType = IRB.CreatePointerCast(Addr, Addr->getType())->getType();
661 if ( ArgType != Int8PtrTy && ArgType != Int16PtrTy &&
662 ArgType != Int32PtrTy && ArgType != Int64PtrTy ) {
663 // if other types of load or stores are passed in
667 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, Addr->getType()));
668 if (IsWrite) NumInstrumentedWrites++;
669 else NumInstrumentedReads++;
673 bool CDSPass::instrumentVolatile(Instruction * I, const DataLayout &DL) {
675 const unsigned ByteSize = 1U << Idx;
676 const unsigned BitSize = ByteSize * 8;
677 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
678 Type *PtrTy = Ty->getPointerTo();
679 Value *position = getPosition(I, IRB);
681 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
682 Value *Addr = LI->getPointerOperand();
683 int Idx=getMemoryAccessFuncIndex(Addr, DL);
687 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), position};
688 Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
689 Value *C = IRB.CreateCall(CDSVolatileLoad[Idx], Args);
690 Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
691 I->replaceAllUsesWith(Cast);
692 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
693 assert( SI->isVolatile() );
694 Value *Addr = SI->getPointerOperand();
695 int Idx=getMemoryAccessFuncIndex(Addr, DL);
699 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
700 IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
702 CallInst *C = CallInst::Create(CDSVolatileStore[Idx], Args);
703 ReplaceInstWithInst(I, C);
711 bool CDSPass::instrumentMemIntrinsic(Instruction *I) {
713 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
716 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
717 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
718 IRB.CreateIntCast(M->getArgOperand(2), IntPtrTy, false)});
719 I->eraseFromParent();
720 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
722 isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
723 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
724 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
725 IRB.CreateIntCast(M->getArgOperand(2), IntPtrTy, false)});
726 I->eraseFromParent();
731 bool CDSPass::instrumentAtomic(Instruction * I, const DataLayout &DL) {
734 if (auto *CI = dyn_cast<CallInst>(I)) {
735 return instrumentAtomicCall(CI, DL);
738 Value *position = getPosition(I, IRB);
739 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
740 Value *Addr = LI->getPointerOperand();
741 int Idx=getMemoryAccessFuncIndex(Addr, DL);
745 int atomic_order_index = getAtomicOrderIndex(LI->getOrdering());
746 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
747 Value *Args[] = {Addr, order, position};
748 Instruction* funcInst = CallInst::Create(CDSAtomicLoad[Idx], Args);
749 ReplaceInstWithInst(LI, funcInst);
750 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
751 Value *Addr = SI->getPointerOperand();
752 int Idx=getMemoryAccessFuncIndex(Addr, DL);
756 int atomic_order_index = getAtomicOrderIndex(SI->getOrdering());
757 Value *val = SI->getValueOperand();
758 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
759 Value *Args[] = {Addr, val, order, position};
760 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], Args);
761 ReplaceInstWithInst(SI, funcInst);
762 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
763 Value *Addr = RMWI->getPointerOperand();
764 int Idx=getMemoryAccessFuncIndex(Addr, DL);
768 int atomic_order_index = getAtomicOrderIndex(RMWI->getOrdering());
769 Value *val = RMWI->getValOperand();
770 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
771 Value *Args[] = {Addr, val, order, position};
772 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[RMWI->getOperation()][Idx], Args);
773 ReplaceInstWithInst(RMWI, funcInst);
774 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
775 IRBuilder<> IRB(CASI);
777 Value *Addr = CASI->getPointerOperand();
778 int Idx=getMemoryAccessFuncIndex(Addr, DL);
782 const unsigned ByteSize = 1U << Idx;
783 const unsigned BitSize = ByteSize * 8;
784 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
785 Type *PtrTy = Ty->getPointerTo();
787 Value *CmpOperand = IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
788 Value *NewOperand = IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
790 int atomic_order_index_succ = getAtomicOrderIndex(CASI->getSuccessOrdering());
791 int atomic_order_index_fail = getAtomicOrderIndex(CASI->getFailureOrdering());
792 Value *order_succ = ConstantInt::get(OrdTy, atomic_order_index_succ);
793 Value *order_fail = ConstantInt::get(OrdTy, atomic_order_index_fail);
795 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
796 CmpOperand, NewOperand,
797 order_succ, order_fail, position};
799 CallInst *funcInst = IRB.CreateCall(CDSAtomicCAS_V1[Idx], Args);
800 Value *Success = IRB.CreateICmpEQ(funcInst, CmpOperand);
802 Value *OldVal = funcInst;
803 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
804 if (Ty != OrigOldValTy) {
805 // The value is a pointer, so we need to cast the return value.
806 OldVal = IRB.CreateIntToPtr(funcInst, OrigOldValTy);
810 IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
811 Res = IRB.CreateInsertValue(Res, Success, 1);
813 I->replaceAllUsesWith(Res);
814 I->eraseFromParent();
815 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
816 int atomic_order_index = getAtomicOrderIndex(FI->getOrdering());
817 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
818 Value *Args[] = {order, position};
820 CallInst *funcInst = CallInst::Create(CDSAtomicThreadFence, Args);
821 ReplaceInstWithInst(FI, funcInst);
822 // errs() << "Thread Fences replaced\n";
827 bool CDSPass::isAtomicCall(Instruction *I) {
828 if ( auto *CI = dyn_cast<CallInst>(I) ) {
829 Function *fun = CI->getCalledFunction();
833 StringRef funName = fun->getName();
835 // TODO: come up with better rules for function name checking
836 for (StringRef name : AtomicFuncNames) {
837 if ( funName.contains(name) )
841 for (StringRef PartialName : PartialAtomicFuncNames) {
842 if (funName.contains(PartialName) &&
843 funName.contains("atomic") )
851 bool CDSPass::instrumentAtomicCall(CallInst *CI, const DataLayout &DL) {
853 Function *fun = CI->getCalledFunction();
854 StringRef funName = fun->getName();
855 std::vector<Value *> parameters;
857 User::op_iterator begin = CI->arg_begin();
858 User::op_iterator end = CI->arg_end();
859 for (User::op_iterator it = begin; it != end; ++it) {
861 parameters.push_back(param);
864 // obtain source line number of the CallInst
865 Value *position = getPosition(CI, IRB);
867 // the pointer to the address is always the first argument
868 Value *OrigPtr = parameters[0];
870 int Idx = getMemoryAccessFuncIndex(OrigPtr, DL);
874 const unsigned ByteSize = 1U << Idx;
875 const unsigned BitSize = ByteSize * 8;
876 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
877 Type *PtrTy = Ty->getPointerTo();
879 // atomic_init; args = {obj, order}
880 if (funName.contains("atomic_init")) {
881 Value *OrigVal = parameters[1];
883 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
885 if (OrigVal->getType()->isPtrOrPtrVectorTy())
886 val = IRB.CreatePointerCast(OrigVal, Ty);
888 val = IRB.CreateIntCast(OrigVal, Ty, true);
890 Value *args[] = {ptr, val, position};
892 if (!checkSignature(CDSAtomicInit[Idx], args))
895 Instruction* funcInst = CallInst::Create(CDSAtomicInit[Idx], args);
896 ReplaceInstWithInst(CI, funcInst);
900 // atomic_load; args = {obj, order}
901 if (funName.contains("atomic_load")) {
902 bool isExplicit = funName.contains("atomic_load_explicit");
904 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
907 order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
909 order = ConstantInt::get(OrdTy,
910 (int) AtomicOrderingCABI::seq_cst);
911 Value *args[] = {ptr, order, position};
913 if (!checkSignature(CDSAtomicLoad[Idx], args))
916 Instruction* funcInst = CallInst::Create(CDSAtomicLoad[Idx], args);
917 ReplaceInstWithInst(CI, funcInst);
920 } else if (funName.contains("atomic") &&
921 funName.contains("load") ) {
922 // does this version of call always have an atomic order as an argument?
923 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
924 Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
925 Value *args[] = {ptr, order, position};
927 // Without this check, gdax does not compile :(
928 if (!CI->getType()->isPointerTy()) {
932 if (!checkSignature(CDSAtomicLoad[Idx], args))
935 CallInst *funcInst = IRB.CreateCall(CDSAtomicLoad[Idx], args);
936 Value *RetVal = IRB.CreateIntToPtr(funcInst, CI->getType());
938 CI->replaceAllUsesWith(RetVal);
939 CI->eraseFromParent();
944 // atomic_store; args = {obj, val, order}
945 if (funName.contains("atomic_store")) {
946 bool isExplicit = funName.contains("atomic_store_explicit");
947 Value *OrigVal = parameters[1];
949 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
950 Value *val = IRB.CreatePointerCast(OrigVal, Ty);
953 order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
955 order = ConstantInt::get(OrdTy,
956 (int) AtomicOrderingCABI::seq_cst);
957 Value *args[] = {ptr, val, order, position};
959 if (!checkSignature(CDSAtomicStore[Idx], args))
962 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args);
963 ReplaceInstWithInst(CI, funcInst);
966 } else if (funName.contains("atomic") &&
967 funName.contains("store") ) {
968 // Does this version of call always have an atomic order as an argument?
969 if (parameters.size() < 3)
972 Value *OrigVal = parameters[1];
973 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
976 if (OrigVal->getType()->isPtrOrPtrVectorTy())
977 val = IRB.CreatePointerCast(OrigVal, Ty);
979 val = IRB.CreateIntCast(OrigVal, Ty, true);
981 Value *order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
982 Value *args[] = {ptr, val, order, position};
984 if (!checkSignature(CDSAtomicStore[Idx], args))
987 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args);
988 ReplaceInstWithInst(CI, funcInst);
993 // atomic_fetch_*; args = {obj, val, order}
994 if (funName.contains("atomic_fetch_") ||
995 funName.contains("atomic_exchange")) {
997 bool isExplicit = funName.contains("_explicit");
998 Value *OrigVal = parameters[1];
1001 if ( funName.contains("_fetch_add") )
1002 op = AtomicRMWInst::Add;
1003 else if ( funName.contains("_fetch_sub") )
1004 op = AtomicRMWInst::Sub;
1005 else if ( funName.contains("_fetch_and") )
1006 op = AtomicRMWInst::And;
1007 else if ( funName.contains("_fetch_or") )
1008 op = AtomicRMWInst::Or;
1009 else if ( funName.contains("_fetch_xor") )
1010 op = AtomicRMWInst::Xor;
1011 else if ( funName.contains("atomic_exchange") )
1012 op = AtomicRMWInst::Xchg;
1014 errs() << "Unknown atomic read-modify-write operation\n";
1018 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
1020 if (OrigVal->getType()->isPtrOrPtrVectorTy())
1021 val = IRB.CreatePointerCast(OrigVal, Ty);
1023 val = IRB.CreateIntCast(OrigVal, Ty, true);
1027 order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
1029 order = ConstantInt::get(OrdTy,
1030 (int) AtomicOrderingCABI::seq_cst);
1031 Value *args[] = {ptr, val, order, position};
1033 if (!checkSignature(CDSAtomicRMW[op][Idx], args))
1036 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[op][Idx], args);
1037 ReplaceInstWithInst(CI, funcInst);
1040 } else if (funName.contains("fetch")) {
1041 errs() << "atomic fetch captured. Not implemented yet. ";
1042 errs() << "See source file :";
1043 getPosition(CI, IRB, true);
1045 } else if (funName.contains("exchange") &&
1046 !funName.contains("compare_exchange") ) {
1047 if (CI->getType()->isPointerTy()) {
1049 * TODO: instrument the following case
1051 * std::atomic<struct T *> m_tail;
1053 * struct T * pred = m_tail.exchange(me, memory_order_*);
1055 errs() << "atomic exchange captured. Not implemented yet. ";
1056 errs() << "See source file :";
1057 getPosition(CI, IRB, true);
1062 Value *OrigVal = parameters[1];
1064 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
1066 if (OrigVal->getType()->isPtrOrPtrVectorTy())
1067 val = IRB.CreatePointerCast(OrigVal, Ty);
1069 val = IRB.CreateIntCast(OrigVal, Ty, true);
1071 Value *order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
1072 Value *args[] = {ptr, val, order, position};
1074 int op = AtomicRMWInst::Xchg;
1076 if (!checkSignature(CDSAtomicRMW[op][Idx], args))
1079 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[op][Idx], args);
1080 ReplaceInstWithInst(CI, funcInst);
1085 /* atomic_compare_exchange_*;
1086 args = {obj, expected, new value, order1, order2}
1088 if ( funName.contains("atomic_compare_exchange_") ) {
1089 bool isExplicit = funName.contains("_explicit");
1091 Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy);
1092 Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy);
1093 Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty);
1095 Value *order_succ, *order_fail;
1097 order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
1099 if (parameters.size() > 4) {
1100 order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
1102 /* The failure order is not provided */
1103 order_fail = order_succ;
1104 ConstantInt * order_succ_cast = dyn_cast<ConstantInt>(order_succ);
1105 int index = order_succ_cast->getSExtValue();
1107 order_fail = ConstantInt::get(OrdTy,
1108 AtomicCasFailureOrderIndex(index));
1111 order_succ = ConstantInt::get(OrdTy,
1112 (int) AtomicOrderingCABI::seq_cst);
1113 order_fail = ConstantInt::get(OrdTy,
1114 (int) AtomicOrderingCABI::seq_cst);
1117 Value *args[] = {Addr, CmpOperand, NewOperand,
1118 order_succ, order_fail, position};
1120 if (!checkSignature(CDSAtomicCAS_V2[Idx], args))
1123 Instruction* funcInst = CallInst::Create(CDSAtomicCAS_V2[Idx], args);
1124 ReplaceInstWithInst(CI, funcInst);
1127 } else if ( funName.contains("compare_exchange_strong") ||
1128 funName.contains("compare_exchange_weak") ) {
1129 Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy);
1130 Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy);
1131 Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty);
1133 Value *order_succ, *order_fail;
1134 order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
1136 if (parameters.size() > 4) {
1137 order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
1139 /* The failure order is not provided */
1140 order_fail = order_succ;
1141 ConstantInt * order_succ_cast = dyn_cast<ConstantInt>(order_succ);
1142 int index = order_succ_cast->getSExtValue();
1144 order_fail = ConstantInt::get(OrdTy,
1145 AtomicCasFailureOrderIndex(index));
1148 Value *args[] = {Addr, CmpOperand, NewOperand,
1149 order_succ, order_fail, position};
1151 if (!checkSignature(CDSAtomicCAS_V2[Idx], args))
1154 Instruction* funcInst = CallInst::Create(CDSAtomicCAS_V2[Idx], args);
1155 ReplaceInstWithInst(CI, funcInst);
1163 int CDSPass::getMemoryAccessFuncIndex(Value *Addr,
1164 const DataLayout &DL) {
1165 Type *OrigPtrTy = Addr->getType();
1166 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
1167 assert(OrigTy->isSized());
1168 uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
1169 if (TypeSize != 8 && TypeSize != 16 &&
1170 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
1171 NumAccessesWithBadSize++;
1172 // Ignore all unusual sizes.
1175 size_t Idx = countTrailingZeros(TypeSize / 8);
1176 //assert(Idx < kNumberOfAccessSizes);
1177 if (Idx >= kNumberOfAccessSizes) {
1183 bool CDSPass::instrumentLoops(Function &F)
1185 DominatorTree DT(F);
1188 SmallVector<Loop *, 4> Loops = LI.getLoopsInPreorder();
1189 bool instrumented = false;
1191 // Do a post-order traversal of the loops so that counter updates can be
1192 // iteratively hoisted outside the loop nest.
1193 for (auto *Loop : llvm::reverse(Loops)) {
1194 bool instrument_loop = false;
1196 // Iterator over loop blocks and search for atomics and volatiles
1197 Loop::block_iterator it;
1198 for (it = Loop->block_begin(); it != Loop->block_end(); it++) {
1199 BasicBlock * block = *it;
1200 for (auto &Inst : *block) {
1201 if ( (&Inst)->isAtomic() ) {
1202 instrument_loop = true;
1204 } else if (isAtomicCall(&Inst)) {
1205 instrument_loop = true;
1207 } else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst)) {
1208 LoadInst *LI = dyn_cast<LoadInst>(&Inst);
1209 StoreInst *SI = dyn_cast<StoreInst>(&Inst);
1210 bool isVolatile = ( LI ? LI->isVolatile() : SI->isVolatile() );
1213 instrument_loop = true;
1219 if (instrument_loop)
1223 if (instrument_loop) {
1224 // TODO: what to instrument?
1225 errs() << "Function: " << F.getName() << "\n";
1226 BasicBlock * header = Loop->getHeader();
1229 instrumented = true;
1233 return instrumented;
1236 char CDSPass::ID = 0;
1238 // Automatically enable the pass.
1239 static void registerCDSPass(const PassManagerBuilder &,
1240 legacy::PassManagerBase &PM) {
1241 PM.add(new CDSPass());
1244 /* Enable the pass when opt level is greater than 0 */
1245 static RegisterStandardPasses
1246 RegisterMyPass1(PassManagerBuilder::EP_OptimizerLast,
1249 /* Enable the pass when opt level is 0 */
1250 static RegisterStandardPasses
1251 RegisterMyPass2(PassManagerBuilder::EP_EnabledOnOptLevel0,