1 //===-- CDSPass.cpp - xxx -------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 // This file is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 // This file is a modified version of ThreadSanitizer.cpp, a part of a race detector.
13 // The tool is under development, for the details about previous versions see
14 // http://code.google.com/p/data-race-test
16 // The instrumentation phase is quite simple:
17 // - Insert calls to run-time library before every memory access.
18 // - Optimizations may apply to avoid instrumenting some of the accesses.
19 // - Insert calls at function entry/exit.
20 // The rest is handled by the run-time library.
21 //===----------------------------------------------------------------------===//
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/SmallString.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/IRBuilder.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/LegacyPassManager.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/PassManager.h"
37 #include "llvm/Pass.h"
38 #include "llvm/ProfileData/InstrProf.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/Support/AtomicOrdering.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Transforms/Scalar.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Transforms/Utils/EscapeEnumerator.h"
45 // #include "llvm/Transforms/Utils/ModuleUtils.h"
46 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
51 #define DEBUG_TYPE "CDS"
52 #include <llvm/IR/DebugLoc.h>
54 Value *getPosition( Instruction * I, IRBuilder <> IRB, bool print = false)
56 const DebugLoc & debug_location = I->getDebugLoc ();
57 std::string position_string;
59 llvm::raw_string_ostream position_stream (position_string);
60 debug_location . print (position_stream);
64 errs() << position_string << "\n";
67 return IRB.CreateGlobalStringPtr (position_string);
70 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
71 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
72 STATISTIC(NumOmittedReadsBeforeWrite,
73 "Number of reads ignored due to following writes");
74 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
75 // STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
76 // STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
77 STATISTIC(NumOmittedReadsFromConstantGlobals,
78 "Number of reads from constant globals");
79 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
80 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
82 // static const char *const kCDSModuleCtorName = "cds.module_ctor";
83 // static const char *const kCDSInitName = "cds_init";
94 static const size_t kNumberOfAccessSizes = 4;
96 int getAtomicOrderIndex(AtomicOrdering order) {
98 case AtomicOrdering::Monotonic:
99 return (int)AtomicOrderingCABI::relaxed;
100 // case AtomicOrdering::Consume: // not specified yet
101 // return AtomicOrderingCABI::consume;
102 case AtomicOrdering::Acquire:
103 return (int)AtomicOrderingCABI::acquire;
104 case AtomicOrdering::Release:
105 return (int)AtomicOrderingCABI::release;
106 case AtomicOrdering::AcquireRelease:
107 return (int)AtomicOrderingCABI::acq_rel;
108 case AtomicOrdering::SequentiallyConsistent:
109 return (int)AtomicOrderingCABI::seq_cst;
111 // unordered or Not Atomic
116 AtomicOrderingCABI indexToAtomicOrder(int index) {
119 return AtomicOrderingCABI::relaxed;
121 return AtomicOrderingCABI::consume;
123 return AtomicOrderingCABI::acquire;
125 return AtomicOrderingCABI::release;
127 return AtomicOrderingCABI::acq_rel;
129 return AtomicOrderingCABI::seq_cst;
131 errs() << "Bad Atomic index\n";
132 return AtomicOrderingCABI::seq_cst;
136 /* According to atomic_base.h: __cmpexch_failure_order */
137 int AtomicCasFailureOrderIndex(int index) {
138 AtomicOrderingCABI succ_order = indexToAtomicOrder(index);
139 AtomicOrderingCABI fail_order;
140 if (succ_order == AtomicOrderingCABI::acq_rel)
141 fail_order = AtomicOrderingCABI::acquire;
142 else if (succ_order == AtomicOrderingCABI::release)
143 fail_order = AtomicOrderingCABI::relaxed;
145 fail_order = succ_order;
147 return (int) fail_order;
150 /* The original function checkSanitizerInterfaceFunction was defined
151 * in llvm/Transforms/Utils/ModuleUtils.h
153 static Function * checkCDSPassInterfaceFunction(Constant *FuncOrBitcast) {
154 if (isa<Function>(FuncOrBitcast))
155 return cast<Function>(FuncOrBitcast);
156 FuncOrBitcast->print(errs());
159 raw_string_ostream Stream(Err);
160 Stream << "CDSPass interface function redefined: " << *FuncOrBitcast;
161 report_fatal_error(Err);
165 struct CDSPass : public FunctionPass {
166 CDSPass() : FunctionPass(ID) {}
167 StringRef getPassName() const override;
168 bool runOnFunction(Function &F) override;
169 bool doInitialization(Module &M) override;
173 void initializeCallbacks(Module &M);
174 bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
175 bool instrumentVolatile(Instruction *I, const DataLayout &DL);
176 bool instrumentMemIntrinsic(Instruction *I);
177 bool isAtomicCall(Instruction *I);
178 bool instrumentAtomic(Instruction *I, const DataLayout &DL);
179 bool instrumentAtomicCall(CallInst *CI, const DataLayout &DL);
180 bool shouldInstrumentBeforeAtomics(Instruction *I);
181 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
182 SmallVectorImpl<Instruction *> &All,
183 const DataLayout &DL);
184 bool addrPointsToConstantData(Value *Addr);
185 int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
187 Function * CDSFuncEntry;
188 Function * CDSFuncExit;
190 Function * CDSLoad[kNumberOfAccessSizes];
191 Function * CDSStore[kNumberOfAccessSizes];
192 Function * CDSVolatileLoad[kNumberOfAccessSizes];
193 Function * CDSVolatileStore[kNumberOfAccessSizes];
194 Function * CDSAtomicInit[kNumberOfAccessSizes];
195 Function * CDSAtomicLoad[kNumberOfAccessSizes];
196 Function * CDSAtomicStore[kNumberOfAccessSizes];
197 Function * CDSAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
198 Function * CDSAtomicCAS_V1[kNumberOfAccessSizes];
199 Function * CDSAtomicCAS_V2[kNumberOfAccessSizes];
200 Function * CDSAtomicThreadFence;
201 Function * MemmoveFn, * MemcpyFn, * MemsetFn;
202 // Function * CDSCtorFunction;
204 std::vector<StringRef> AtomicFuncNames;
205 std::vector<StringRef> PartialAtomicFuncNames;
209 StringRef CDSPass::getPassName() const {
213 void CDSPass::initializeCallbacks(Module &M) {
214 LLVMContext &Ctx = M.getContext();
216 Attr = Attr.addAttribute(Ctx, AttributeList::FunctionIndex,
217 Attribute::NoUnwind);
219 Type * Int1Ty = Type::getInt1Ty(Ctx);
220 Type * Int32Ty = Type::getInt32Ty(Ctx);
221 OrdTy = Type::getInt32Ty(Ctx);
223 Int8PtrTy = Type::getInt8PtrTy(Ctx);
224 Int16PtrTy = Type::getInt16PtrTy(Ctx);
225 Int32PtrTy = Type::getInt32PtrTy(Ctx);
226 Int64PtrTy = Type::getInt64PtrTy(Ctx);
228 VoidTy = Type::getVoidTy(Ctx);
230 CDSFuncEntry = checkCDSPassInterfaceFunction(
231 M.getOrInsertFunction("cds_func_entry",
232 Attr, VoidTy, Int8PtrTy));
233 CDSFuncExit = checkCDSPassInterfaceFunction(
234 M.getOrInsertFunction("cds_func_exit",
235 Attr, VoidTy, Int8PtrTy));
237 // Get the function to call from our untime library.
238 for (unsigned i = 0; i < kNumberOfAccessSizes; i++) {
239 const unsigned ByteSize = 1U << i;
240 const unsigned BitSize = ByteSize * 8;
242 std::string ByteSizeStr = utostr(ByteSize);
243 std::string BitSizeStr = utostr(BitSize);
245 Type *Ty = Type::getIntNTy(Ctx, BitSize);
246 Type *PtrTy = Ty->getPointerTo();
248 // uint8_t cds_atomic_load8 (void * obj, int atomic_index)
249 // void cds_atomic_store8 (void * obj, int atomic_index, uint8_t val)
250 SmallString<32> LoadName("cds_load" + BitSizeStr);
251 SmallString<32> StoreName("cds_store" + BitSizeStr);
252 SmallString<32> VolatileLoadName("cds_volatile_load" + BitSizeStr);
253 SmallString<32> VolatileStoreName("cds_volatile_store" + BitSizeStr);
254 SmallString<32> AtomicInitName("cds_atomic_init" + BitSizeStr);
255 SmallString<32> AtomicLoadName("cds_atomic_load" + BitSizeStr);
256 SmallString<32> AtomicStoreName("cds_atomic_store" + BitSizeStr);
258 CDSLoad[i] = checkCDSPassInterfaceFunction(
259 M.getOrInsertFunction(LoadName, Attr, VoidTy, PtrTy));
260 CDSStore[i] = checkCDSPassInterfaceFunction(
261 M.getOrInsertFunction(StoreName, Attr, VoidTy, PtrTy));
262 CDSVolatileLoad[i] = checkCDSPassInterfaceFunction(
263 M.getOrInsertFunction(VolatileLoadName,
264 Attr, Ty, PtrTy, Int8PtrTy));
265 CDSVolatileStore[i] = checkCDSPassInterfaceFunction(
266 M.getOrInsertFunction(VolatileStoreName,
267 Attr, VoidTy, PtrTy, Ty, Int8PtrTy));
268 CDSAtomicInit[i] = checkCDSPassInterfaceFunction(
269 M.getOrInsertFunction(AtomicInitName,
270 Attr, VoidTy, PtrTy, Ty, Int8PtrTy));
271 CDSAtomicLoad[i] = checkCDSPassInterfaceFunction(
272 M.getOrInsertFunction(AtomicLoadName,
273 Attr, Ty, PtrTy, OrdTy, Int8PtrTy));
274 CDSAtomicStore[i] = checkCDSPassInterfaceFunction(
275 M.getOrInsertFunction(AtomicStoreName,
276 Attr, VoidTy, PtrTy, Ty, OrdTy, Int8PtrTy));
278 for (int op = AtomicRMWInst::FIRST_BINOP;
279 op <= AtomicRMWInst::LAST_BINOP; ++op) {
280 CDSAtomicRMW[op][i] = nullptr;
281 std::string NamePart;
283 if (op == AtomicRMWInst::Xchg)
284 NamePart = "_exchange";
285 else if (op == AtomicRMWInst::Add)
286 NamePart = "_fetch_add";
287 else if (op == AtomicRMWInst::Sub)
288 NamePart = "_fetch_sub";
289 else if (op == AtomicRMWInst::And)
290 NamePart = "_fetch_and";
291 else if (op == AtomicRMWInst::Or)
292 NamePart = "_fetch_or";
293 else if (op == AtomicRMWInst::Xor)
294 NamePart = "_fetch_xor";
298 SmallString<32> AtomicRMWName("cds_atomic" + NamePart + BitSizeStr);
299 CDSAtomicRMW[op][i] = checkCDSPassInterfaceFunction(
300 M.getOrInsertFunction(AtomicRMWName,
301 Attr, Ty, PtrTy, Ty, OrdTy, Int8PtrTy));
304 // only supportes strong version
305 SmallString<32> AtomicCASName_V1("cds_atomic_compare_exchange" + BitSizeStr + "_v1");
306 SmallString<32> AtomicCASName_V2("cds_atomic_compare_exchange" + BitSizeStr + "_v2");
307 CDSAtomicCAS_V1[i] = checkCDSPassInterfaceFunction(
308 M.getOrInsertFunction(AtomicCASName_V1,
309 Attr, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, Int8PtrTy));
310 CDSAtomicCAS_V2[i] = checkCDSPassInterfaceFunction(
311 M.getOrInsertFunction(AtomicCASName_V2,
312 Attr, Int1Ty, PtrTy, PtrTy, Ty, OrdTy, OrdTy, Int8PtrTy));
315 CDSAtomicThreadFence = checkCDSPassInterfaceFunction(
316 M.getOrInsertFunction("cds_atomic_thread_fence", Attr, VoidTy, OrdTy, Int8PtrTy));
318 MemmoveFn = checkCDSPassInterfaceFunction(
319 M.getOrInsertFunction("memmove", Attr, Int8PtrTy, Int8PtrTy,
320 Int8PtrTy, IntPtrTy));
321 MemcpyFn = checkCDSPassInterfaceFunction(
322 M.getOrInsertFunction("memcpy", Attr, Int8PtrTy, Int8PtrTy,
323 Int8PtrTy, IntPtrTy));
324 MemsetFn = checkCDSPassInterfaceFunction(
325 M.getOrInsertFunction("memset", Attr, Int8PtrTy, Int8PtrTy,
329 bool CDSPass::doInitialization(Module &M) {
330 const DataLayout &DL = M.getDataLayout();
331 IntPtrTy = DL.getIntPtrType(M.getContext());
333 // createSanitizerCtorAndInitFunctions is defined in "llvm/Transforms/Utils/ModuleUtils.h"
334 // We do not support it yet
336 std::tie(CDSCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(
337 M, kCDSModuleCtorName, kCDSInitName, {}, {});
339 appendToGlobalCtors(M, CDSCtorFunction, 0);
344 "atomic_init", "atomic_load", "atomic_store",
345 "atomic_fetch_", "atomic_exchange", "atomic_compare_exchange_"
348 PartialAtomicFuncNames =
350 "load", "store", "fetch", "exchange", "compare_exchange_"
356 static bool isVtableAccess(Instruction *I) {
357 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
358 return Tag->isTBAAVtableAccess();
362 // Do not instrument known races/"benign races" that come from compiler
363 // instrumentatin. The user has no way of suppressing them.
364 static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
365 // Peel off GEPs and BitCasts.
366 Addr = Addr->stripInBoundsOffsets();
368 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
369 if (GV->hasSection()) {
370 StringRef SectionName = GV->getSection();
371 // Check if the global is in the PGO counters section.
372 auto OF = Triple(M->getTargetTriple()).getObjectFormat();
373 if (SectionName.endswith(
374 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
378 // Check if the global is private gcov data.
379 if (GV->getName().startswith("__llvm_gcov") ||
380 GV->getName().startswith("__llvm_gcda"))
384 // Do not instrument acesses from different address spaces; we cannot deal
387 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
388 if (PtrTy->getPointerAddressSpace() != 0)
395 bool CDSPass::addrPointsToConstantData(Value *Addr) {
396 // If this is a GEP, just analyze its pointer operand.
397 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
398 Addr = GEP->getPointerOperand();
400 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
401 if (GV->isConstant()) {
402 // Reads from constant globals can not race with any writes.
403 NumOmittedReadsFromConstantGlobals++;
406 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
407 if (isVtableAccess(L)) {
408 // Reads from a vtable pointer can not race with any writes.
409 NumOmittedReadsFromVtable++;
416 bool CDSPass::shouldInstrumentBeforeAtomics(Instruction * Inst) {
417 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
418 AtomicOrdering ordering = LI->getOrdering();
419 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
421 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
422 AtomicOrdering ordering = SI->getOrdering();
423 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
425 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Inst)) {
426 AtomicOrdering ordering = RMWI->getOrdering();
427 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
429 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
430 AtomicOrdering ordering = CASI->getSuccessOrdering();
431 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
433 } else if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) {
434 AtomicOrdering ordering = FI->getOrdering();
435 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
442 void CDSPass::chooseInstructionsToInstrument(
443 SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
444 const DataLayout &DL) {
445 SmallPtrSet<Value*, 8> WriteTargets;
446 // Iterate from the end.
447 for (Instruction *I : reverse(Local)) {
448 if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
449 Value *Addr = Store->getPointerOperand();
450 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
452 WriteTargets.insert(Addr);
454 LoadInst *Load = cast<LoadInst>(I);
455 Value *Addr = Load->getPointerOperand();
456 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
458 if (WriteTargets.count(Addr)) {
459 // We will write to this temp, so no reason to analyze the read.
460 NumOmittedReadsBeforeWrite++;
463 if (addrPointsToConstantData(Addr)) {
464 // Addr points to some constant data -- it can not race with any writes.
468 Value *Addr = isa<StoreInst>(*I)
469 ? cast<StoreInst>(I)->getPointerOperand()
470 : cast<LoadInst>(I)->getPointerOperand();
471 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
472 !PointerMayBeCaptured(Addr, true, true)) {
473 // The variable is addressable but not captured, so it cannot be
474 // referenced from a different thread and participate in a data race
475 // (see llvm/Analysis/CaptureTracking.h for details).
476 NumOmittedNonCaptured++;
485 void CDSPass::InsertRuntimeIgnores(Function &F) {
486 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
487 IRB.CreateCall(CDSIgnoreBegin);
488 EscapeEnumerator EE(F, "cds_ignore_cleanup", ClHandleCxxExceptions);
489 while (IRBuilder<> *AtExit = EE.Next()) {
490 AtExit->CreateCall(CDSIgnoreEnd);
494 bool CDSPass::runOnFunction(Function &F) {
495 if (F.getName() == "main") {
496 F.setName("user_main");
497 errs() << "main replaced by user_main\n";
500 initializeCallbacks( *F.getParent() );
501 SmallVector<Instruction*, 8> AllLoadsAndStores;
502 SmallVector<Instruction*, 8> LocalLoadsAndStores;
503 SmallVector<Instruction*, 8> VolatileLoadsAndStores;
504 SmallVector<Instruction*, 8> AtomicAccesses;
505 SmallVector<Instruction*, 8> MemIntrinCalls;
508 bool HasAtomic = false;
509 bool HasVolatile = false;
510 const DataLayout &DL = F.getParent()->getDataLayout();
513 for (auto &Inst : BB) {
514 if ( (&Inst)->isAtomic() ) {
515 AtomicAccesses.push_back(&Inst);
518 if (shouldInstrumentBeforeAtomics(&Inst)) {
519 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
522 } else if (isAtomicCall(&Inst) ) {
523 AtomicAccesses.push_back(&Inst);
525 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
527 } else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst)) {
528 LoadInst *LI = dyn_cast<LoadInst>(&Inst);
529 StoreInst *SI = dyn_cast<StoreInst>(&Inst);
530 bool isVolatile = ( LI ? LI->isVolatile() : SI->isVolatile() );
533 VolatileLoadsAndStores.push_back(&Inst);
536 LocalLoadsAndStores.push_back(&Inst);
537 } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
538 if (isa<MemIntrinsic>(Inst))
539 MemIntrinCalls.push_back(&Inst);
541 /*if (CallInst *CI = dyn_cast<CallInst>(&Inst))
542 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
545 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
550 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
553 for (auto Inst : AllLoadsAndStores) {
554 Res |= instrumentLoadOrStore(Inst, DL);
557 for (auto Inst : VolatileLoadsAndStores) {
558 Res |= instrumentVolatile(Inst, DL);
561 for (auto Inst : AtomicAccesses) {
562 Res |= instrumentAtomic(Inst, DL);
565 for (auto Inst : MemIntrinCalls) {
566 Res |= instrumentMemIntrinsic(Inst);
569 // Only instrument functions that contain atomics or volatiles
570 if (Res && ( HasAtomic || HasVolatile) ) {
571 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
573 Value *ReturnAddress = IRB.CreateCall(
574 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
578 Value * FuncName = IRB.CreateGlobalStringPtr(F.getName());
579 IRB.CreateCall(CDSFuncEntry, FuncName);
581 EscapeEnumerator EE(F, "cds_cleanup", true);
582 while (IRBuilder<> *AtExit = EE.Next()) {
583 AtExit->CreateCall(CDSFuncExit, FuncName);
592 bool CDSPass::instrumentLoadOrStore(Instruction *I,
593 const DataLayout &DL) {
595 bool IsWrite = isa<StoreInst>(*I);
596 Value *Addr = IsWrite
597 ? cast<StoreInst>(I)->getPointerOperand()
598 : cast<LoadInst>(I)->getPointerOperand();
600 // swifterror memory addresses are mem2reg promoted by instruction selection.
601 // As such they cannot have regular uses like an instrumentation function and
602 // it makes no sense to track them as memory.
603 if (Addr->isSwiftError())
606 int Idx = getMemoryAccessFuncIndex(Addr, DL);
610 // not supported by CDS yet
611 /* if (IsWrite && isVtableAccess(I)) {
612 LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n");
613 Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
614 // StoredValue may be a vector type if we are storing several vptrs at once.
615 // In this case, just take the first element of the vector since this is
616 // enough to find vptr races.
617 if (isa<VectorType>(StoredValue->getType()))
618 StoredValue = IRB.CreateExtractElement(
619 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
620 if (StoredValue->getType()->isIntegerTy())
621 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
622 // Call TsanVptrUpdate.
623 IRB.CreateCall(TsanVptrUpdate,
624 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
625 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
626 NumInstrumentedVtableWrites++;
630 if (!IsWrite && isVtableAccess(I)) {
631 IRB.CreateCall(TsanVptrLoad,
632 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
633 NumInstrumentedVtableReads++;
638 Value *OnAccessFunc = nullptr;
639 OnAccessFunc = IsWrite ? CDSStore[Idx] : CDSLoad[Idx];
641 Type *ArgType = IRB.CreatePointerCast(Addr, Addr->getType())->getType();
643 if ( ArgType != Int8PtrTy && ArgType != Int16PtrTy &&
644 ArgType != Int32PtrTy && ArgType != Int64PtrTy ) {
645 // if other types of load or stores are passed in
649 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, Addr->getType()));
650 if (IsWrite) NumInstrumentedWrites++;
651 else NumInstrumentedReads++;
655 bool CDSPass::instrumentVolatile(Instruction * I, const DataLayout &DL) {
657 Value *position = getPosition(I, IRB);
659 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
660 assert( LI->isVolatile() );
661 Value *Addr = LI->getPointerOperand();
662 int Idx=getMemoryAccessFuncIndex(Addr, DL);
666 Value *args[] = {Addr, position};
667 Instruction* funcInst = CallInst::Create(CDSVolatileLoad[Idx], args);
668 ReplaceInstWithInst(LI, funcInst);
669 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
670 assert( SI->isVolatile() );
671 Value *Addr = SI->getPointerOperand();
672 int Idx=getMemoryAccessFuncIndex(Addr, DL);
676 Value *val = SI->getValueOperand();
677 Value *args[] = {Addr, val, position};
678 Instruction* funcInst = CallInst::Create(CDSVolatileStore[Idx], args);
679 ReplaceInstWithInst(SI, funcInst);
687 bool CDSPass::instrumentMemIntrinsic(Instruction *I) {
689 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
692 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
693 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
694 IRB.CreateIntCast(M->getArgOperand(2), IntPtrTy, false)});
695 I->eraseFromParent();
696 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
698 isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
699 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
700 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
701 IRB.CreateIntCast(M->getArgOperand(2), IntPtrTy, false)});
702 I->eraseFromParent();
707 bool CDSPass::instrumentAtomic(Instruction * I, const DataLayout &DL) {
710 if (auto *CI = dyn_cast<CallInst>(I)) {
711 return instrumentAtomicCall(CI, DL);
714 Value *position = getPosition(I, IRB);
716 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
717 Value *Addr = LI->getPointerOperand();
718 int Idx=getMemoryAccessFuncIndex(Addr, DL);
722 int atomic_order_index = getAtomicOrderIndex(LI->getOrdering());
723 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
724 Value *args[] = {Addr, order, position};
725 Instruction* funcInst = CallInst::Create(CDSAtomicLoad[Idx], args);
726 ReplaceInstWithInst(LI, funcInst);
727 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
728 Value *Addr = SI->getPointerOperand();
729 int Idx=getMemoryAccessFuncIndex(Addr, DL);
733 int atomic_order_index = getAtomicOrderIndex(SI->getOrdering());
734 Value *val = SI->getValueOperand();
735 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
736 Value *args[] = {Addr, val, order, position};
737 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args);
738 ReplaceInstWithInst(SI, funcInst);
739 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
740 Value *Addr = RMWI->getPointerOperand();
741 int Idx=getMemoryAccessFuncIndex(Addr, DL);
745 int atomic_order_index = getAtomicOrderIndex(RMWI->getOrdering());
746 Value *val = RMWI->getValOperand();
747 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
748 Value *args[] = {Addr, val, order, position};
749 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[RMWI->getOperation()][Idx], args);
750 ReplaceInstWithInst(RMWI, funcInst);
751 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
752 IRBuilder<> IRB(CASI);
754 Value *Addr = CASI->getPointerOperand();
755 int Idx=getMemoryAccessFuncIndex(Addr, DL);
759 const unsigned ByteSize = 1U << Idx;
760 const unsigned BitSize = ByteSize * 8;
761 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
762 Type *PtrTy = Ty->getPointerTo();
764 Value *CmpOperand = IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
765 Value *NewOperand = IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
767 int atomic_order_index_succ = getAtomicOrderIndex(CASI->getSuccessOrdering());
768 int atomic_order_index_fail = getAtomicOrderIndex(CASI->getFailureOrdering());
769 Value *order_succ = ConstantInt::get(OrdTy, atomic_order_index_succ);
770 Value *order_fail = ConstantInt::get(OrdTy, atomic_order_index_fail);
772 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
773 CmpOperand, NewOperand,
774 order_succ, order_fail, position};
776 CallInst *funcInst = IRB.CreateCall(CDSAtomicCAS_V1[Idx], Args);
777 Value *Success = IRB.CreateICmpEQ(funcInst, CmpOperand);
779 Value *OldVal = funcInst;
780 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
781 if (Ty != OrigOldValTy) {
782 // The value is a pointer, so we need to cast the return value.
783 OldVal = IRB.CreateIntToPtr(funcInst, OrigOldValTy);
787 IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
788 Res = IRB.CreateInsertValue(Res, Success, 1);
790 I->replaceAllUsesWith(Res);
791 I->eraseFromParent();
792 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
793 int atomic_order_index = getAtomicOrderIndex(FI->getOrdering());
794 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
795 Value *Args[] = {order, position};
797 CallInst *funcInst = CallInst::Create(CDSAtomicThreadFence, Args);
798 ReplaceInstWithInst(FI, funcInst);
799 // errs() << "Thread Fences replaced\n";
804 bool CDSPass::isAtomicCall(Instruction *I) {
805 if ( auto *CI = dyn_cast<CallInst>(I) ) {
806 Function *fun = CI->getCalledFunction();
810 StringRef funName = fun->getName();
812 // TODO: come up with better rules for function name checking
813 for (StringRef name : AtomicFuncNames) {
814 if ( funName.contains(name) )
818 for (StringRef PartialName : PartialAtomicFuncNames) {
819 if (funName.contains(PartialName) &&
820 funName.contains("atomic") )
828 bool CDSPass::instrumentAtomicCall(CallInst *CI, const DataLayout &DL) {
830 Function *fun = CI->getCalledFunction();
831 StringRef funName = fun->getName();
832 std::vector<Value *> parameters;
834 User::op_iterator begin = CI->arg_begin();
835 User::op_iterator end = CI->arg_end();
836 for (User::op_iterator it = begin; it != end; ++it) {
838 parameters.push_back(param);
841 // obtain source line number of the CallInst
842 Value *position = getPosition(CI, IRB);
844 // the pointer to the address is always the first argument
845 Value *OrigPtr = parameters[0];
847 int Idx = getMemoryAccessFuncIndex(OrigPtr, DL);
851 const unsigned ByteSize = 1U << Idx;
852 const unsigned BitSize = ByteSize * 8;
853 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
854 Type *PtrTy = Ty->getPointerTo();
856 // atomic_init; args = {obj, order}
857 if (funName.contains("atomic_init")) {
858 Value *OrigVal = parameters[1];
860 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
862 if (OrigVal->getType()->isPtrOrPtrVectorTy())
863 val = IRB.CreatePointerCast(OrigVal, Ty);
865 val = IRB.CreateIntCast(OrigVal, Ty, true);
867 Value *args[] = {ptr, val, position};
869 Instruction* funcInst = CallInst::Create(CDSAtomicInit[Idx], args);
870 ReplaceInstWithInst(CI, funcInst);
875 // atomic_load; args = {obj, order}
876 if (funName.contains("atomic_load")) {
877 bool isExplicit = funName.contains("atomic_load_explicit");
879 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
882 order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
884 order = ConstantInt::get(OrdTy,
885 (int) AtomicOrderingCABI::seq_cst);
886 Value *args[] = {ptr, order, position};
888 Instruction* funcInst = CallInst::Create(CDSAtomicLoad[Idx], args);
889 ReplaceInstWithInst(CI, funcInst);
892 } else if (funName.contains("atomic") &&
893 funName.contains("load") ) {
894 // does this version of call always have an atomic order as an argument?
895 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
896 Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
897 Value *args[] = {ptr, order, position};
899 if (!CI->getType()->isPointerTy()) {
903 CallInst *funcInst = IRB.CreateCall(CDSAtomicLoad[Idx], args);
904 Value *RetVal = IRB.CreateIntToPtr(funcInst, CI->getType());
906 CI->replaceAllUsesWith(RetVal);
907 CI->eraseFromParent();
912 // atomic_store; args = {obj, val, order}
913 if (funName.contains("atomic_store")) {
914 bool isExplicit = funName.contains("atomic_store_explicit");
915 Value *OrigVal = parameters[1];
917 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
918 Value *val = IRB.CreatePointerCast(OrigVal, Ty);
921 order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
923 order = ConstantInt::get(OrdTy,
924 (int) AtomicOrderingCABI::seq_cst);
925 Value *args[] = {ptr, val, order, position};
927 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args);
928 ReplaceInstWithInst(CI, funcInst);
931 } else if (funName.contains("atomic") &&
932 funName.contains("store") ) {
933 // does this version of call always have an atomic order as an argument?
934 Value *OrigVal = parameters[1];
936 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
938 if (OrigVal->getType()->isPtrOrPtrVectorTy())
939 val = IRB.CreatePointerCast(OrigVal, Ty);
941 val = IRB.CreateIntCast(OrigVal, Ty, true);
943 Value *order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
944 Value *args[] = {ptr, val, order, position};
946 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args);
947 ReplaceInstWithInst(CI, funcInst);
952 // atomic_fetch_*; args = {obj, val, order}
953 if (funName.contains("atomic_fetch_") ||
954 funName.contains("atomic_exchange")) {
956 /* TODO: implement stricter function name checking */
957 if (funName.contains("non"))
960 bool isExplicit = funName.contains("_explicit");
961 Value *OrigVal = parameters[1];
964 if ( funName.contains("_fetch_add") )
965 op = AtomicRMWInst::Add;
966 else if ( funName.contains("_fetch_sub") )
967 op = AtomicRMWInst::Sub;
968 else if ( funName.contains("_fetch_and") )
969 op = AtomicRMWInst::And;
970 else if ( funName.contains("_fetch_or") )
971 op = AtomicRMWInst::Or;
972 else if ( funName.contains("_fetch_xor") )
973 op = AtomicRMWInst::Xor;
974 else if ( funName.contains("atomic_exchange") )
975 op = AtomicRMWInst::Xchg;
977 errs() << "Unknown atomic read-modify-write operation\n";
981 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
983 if (OrigVal->getType()->isPtrOrPtrVectorTy())
984 val = IRB.CreatePointerCast(OrigVal, Ty);
986 val = IRB.CreateIntCast(OrigVal, Ty, true);
990 order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
992 order = ConstantInt::get(OrdTy,
993 (int) AtomicOrderingCABI::seq_cst);
994 Value *args[] = {ptr, val, order, position};
996 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[op][Idx], args);
997 ReplaceInstWithInst(CI, funcInst);
1000 } else if (funName.contains("fetch")) {
1001 errs() << "atomic fetch captured. Not implemented yet. ";
1002 errs() << "See source file :";
1003 getPosition(CI, IRB, true);
1005 } else if (funName.contains("exchange") &&
1006 !funName.contains("compare_exchange") ) {
1007 if (CI->getType()->isPointerTy()) {
1008 // Can not deal with this now
1009 errs() << "atomic exchange captured. Not implemented yet. ";
1010 errs() << "See source file :";
1011 getPosition(CI, IRB, true);
1016 Value *OrigVal = parameters[1];
1018 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
1020 if (OrigVal->getType()->isPtrOrPtrVectorTy())
1021 val = IRB.CreatePointerCast(OrigVal, Ty);
1023 val = IRB.CreateIntCast(OrigVal, Ty, true);
1025 Value *order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
1026 Value *args[] = {ptr, val, order, position};
1027 int op = AtomicRMWInst::Xchg;
1029 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[op][Idx], args);
1030 ReplaceInstWithInst(CI, funcInst);
1033 /* atomic_compare_exchange_*;
1034 args = {obj, expected, new value, order1, order2}
1036 if ( funName.contains("atomic_compare_exchange_") ) {
1037 bool isExplicit = funName.contains("_explicit");
1039 Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy);
1040 Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy);
1041 Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty);
1043 Value *order_succ, *order_fail;
1045 order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
1047 if (parameters.size() > 4) {
1048 order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
1050 /* The failure order is not provided */
1051 order_fail = order_succ;
1052 ConstantInt * order_succ_cast = dyn_cast<ConstantInt>(order_succ);
1053 int index = order_succ_cast->getSExtValue();
1055 order_fail = ConstantInt::get(OrdTy,
1056 AtomicCasFailureOrderIndex(index));
1059 order_succ = ConstantInt::get(OrdTy,
1060 (int) AtomicOrderingCABI::seq_cst);
1061 order_fail = ConstantInt::get(OrdTy,
1062 (int) AtomicOrderingCABI::seq_cst);
1065 Value *args[] = {Addr, CmpOperand, NewOperand,
1066 order_succ, order_fail, position};
1068 Instruction* funcInst = CallInst::Create(CDSAtomicCAS_V2[Idx], args);
1069 ReplaceInstWithInst(CI, funcInst);
1072 } else if ( funName.contains("compare_exchange_strong") ||
1073 funName.contains("compare_exchange_weak") ) {
1074 Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy);
1075 Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy);
1076 Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty);
1078 Value *order_succ, *order_fail;
1079 order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
1081 if (parameters.size() > 4) {
1082 order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
1084 /* The failure order is not provided */
1085 order_fail = order_succ;
1086 ConstantInt * order_succ_cast = dyn_cast<ConstantInt>(order_succ);
1087 int index = order_succ_cast->getSExtValue();
1089 order_fail = ConstantInt::get(OrdTy,
1090 AtomicCasFailureOrderIndex(index));
1093 Value *args[] = {Addr, CmpOperand, NewOperand,
1094 order_succ, order_fail, position};
1095 Instruction* funcInst = CallInst::Create(CDSAtomicCAS_V2[Idx], args);
1096 ReplaceInstWithInst(CI, funcInst);
1104 int CDSPass::getMemoryAccessFuncIndex(Value *Addr,
1105 const DataLayout &DL) {
1106 Type *OrigPtrTy = Addr->getType();
1107 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
1108 assert(OrigTy->isSized());
1109 uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
1110 if (TypeSize != 8 && TypeSize != 16 &&
1111 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
1112 NumAccessesWithBadSize++;
1113 // Ignore all unusual sizes.
1116 size_t Idx = countTrailingZeros(TypeSize / 8);
1117 //assert(Idx < kNumberOfAccessSizes);
1118 if (Idx >= kNumberOfAccessSizes) {
1125 char CDSPass::ID = 0;
1127 // Automatically enable the pass.
1128 static void registerCDSPass(const PassManagerBuilder &,
1129 legacy::PassManagerBase &PM) {
1130 PM.add(new CDSPass());
1133 /* Enable the pass when opt level is greater than 0 */
1134 static RegisterStandardPasses
1135 RegisterMyPass1(PassManagerBuilder::EP_OptimizerLast,
1138 /* Enable the pass when opt level is 0 */
1139 static RegisterStandardPasses
1140 RegisterMyPass2(PassManagerBuilder::EP_EnabledOnOptLevel0,