From: weiyu Date: Fri, 21 Jun 2019 19:13:45 +0000 (-0700) Subject: make indentations consistent (all tabs) X-Git-Url: http://plrg.eecs.uci.edu/git/?p=c11llvm.git;a=commitdiff_plain;h=fdc53cf9df7487a430e17f0ff99310f6afc257d8;ds=inline make indentations consistent (all tabs) --- diff --git a/CDSPass.cpp b/CDSPass.cpp index 0cebdf0..7546be0 100644 --- a/CDSPass.cpp +++ b/CDSPass.cpp @@ -49,7 +49,7 @@ using namespace llvm; #define DEBUG_TYPE "CDS" #include -Value *getPosition( Instruction * I, IRBuilder <> IRB) +Value *getPosition( Instruction * I, IRBuilder <> IRB, bool print = false) { const DebugLoc & debug_location = I->getDebugLoc (); std::string position_string; @@ -58,6 +58,10 @@ Value *getPosition( Instruction * I, IRBuilder <> IRB) debug_location . print (position_stream); } + if (print) { + errs() << position_string; + } + return IRB . CreateGlobalStringPtr (position_string); } @@ -84,59 +88,65 @@ Type * Int64PtrTy; Type * VoidTy; static const size_t kNumberOfAccessSizes = 4; -Constant * CDSLoad[kNumberOfAccessSizes]; -Constant * CDSStore[kNumberOfAccessSizes]; -Constant * CDSAtomicInit[kNumberOfAccessSizes]; -Constant * CDSAtomicLoad[kNumberOfAccessSizes]; -Constant * CDSAtomicStore[kNumberOfAccessSizes]; -Constant * CDSAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes]; -Constant * CDSAtomicCAS_V1[kNumberOfAccessSizes]; -Constant * CDSAtomicCAS_V2[kNumberOfAccessSizes]; -Constant * CDSAtomicThreadFence; int getAtomicOrderIndex(AtomicOrdering order){ - switch (order) { - case AtomicOrdering::Monotonic: - return (int)AtomicOrderingCABI::relaxed; -// case AtomicOrdering::Consume: // not specified yet -// return AtomicOrderingCABI::consume; - case AtomicOrdering::Acquire: - return (int)AtomicOrderingCABI::acquire; - case AtomicOrdering::Release: - return (int)AtomicOrderingCABI::release; - case AtomicOrdering::AcquireRelease: - return (int)AtomicOrderingCABI::acq_rel; - case AtomicOrdering::SequentiallyConsistent: - return (int)AtomicOrderingCABI::seq_cst; - default: - // unordered or Not Atomic - return -1; - } + switch (order) { + case AtomicOrdering::Monotonic: + return (int)AtomicOrderingCABI::relaxed; + // case AtomicOrdering::Consume: // not specified yet + // return AtomicOrderingCABI::consume; + case AtomicOrdering::Acquire: + return (int)AtomicOrderingCABI::acquire; + case AtomicOrdering::Release: + return (int)AtomicOrderingCABI::release; + case AtomicOrdering::AcquireRelease: + return (int)AtomicOrderingCABI::acq_rel; + case AtomicOrdering::SequentiallyConsistent: + return (int)AtomicOrderingCABI::seq_cst; + default: + // unordered or Not Atomic + return -1; + } } namespace { - struct CDSPass : public FunctionPass { - static char ID; - CDSPass() : FunctionPass(ID) {} - bool runOnFunction(Function &F) override; - - private: - void initializeCallbacks(Module &M); - bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL); - bool instrumentAtomic(Instruction *I, const DataLayout &DL); - bool instrumentAtomicCall(CallInst *CI, const DataLayout &DL); - void chooseInstructionsToInstrument(SmallVectorImpl &Local, - SmallVectorImpl &All, - const DataLayout &DL); - bool addrPointsToConstantData(Value *Addr); - int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL); - }; + struct CDSPass : public FunctionPass { + static char ID; + CDSPass() : FunctionPass(ID) {} + bool runOnFunction(Function &F) override; + + private: + void initializeCallbacks(Module &M); + bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL); + bool isAtomicCall(Instruction *I); + bool instrumentAtomic(Instruction *I, const DataLayout &DL); + bool instrumentAtomicCall(CallInst *CI, const DataLayout &DL); + void chooseInstructionsToInstrument(SmallVectorImpl &Local, + SmallVectorImpl &All, + const DataLayout &DL); + bool addrPointsToConstantData(Value *Addr); + int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL); + + // Callbacks to run-time library are computed in doInitialization. + Constant * CDSFuncEntry; + Constant * CDSFuncExit; + + Constant * CDSLoad[kNumberOfAccessSizes]; + Constant * CDSStore[kNumberOfAccessSizes]; + Constant * CDSAtomicInit[kNumberOfAccessSizes]; + Constant * CDSAtomicLoad[kNumberOfAccessSizes]; + Constant * CDSAtomicStore[kNumberOfAccessSizes]; + Constant * CDSAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes]; + Constant * CDSAtomicCAS_V1[kNumberOfAccessSizes]; + Constant * CDSAtomicCAS_V2[kNumberOfAccessSizes]; + Constant * CDSAtomicThreadFence; + }; } static bool isVtableAccess(Instruction *I) { - if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa)) - return Tag->isTBAAVtableAccess(); - return false; + if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa)) + return Tag->isTBAAVtableAccess(); + return false; } void CDSPass::initializeCallbacks(Module &M) { @@ -151,7 +161,7 @@ void CDSPass::initializeCallbacks(Module &M) { Int64PtrTy = Type::getInt64PtrTy(Ctx); VoidTy = Type::getVoidTy(Ctx); - + // Get the function to call from our untime library. for (unsigned i = 0; i < kNumberOfAccessSizes; i++) { const unsigned ByteSize = 1U << i; @@ -218,43 +228,317 @@ void CDSPass::initializeCallbacks(Module &M) { VoidTy, OrdTy, Int8PtrTy); } -void printArgs(CallInst *); +static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) { + // Peel off GEPs and BitCasts. + Addr = Addr->stripInBoundsOffsets(); + + if (GlobalVariable *GV = dyn_cast(Addr)) { + if (GV->hasSection()) { + StringRef SectionName = GV->getSection(); + // Check if the global is in the PGO counters section. + auto OF = Triple(M->getTargetTriple()).getObjectFormat(); + if (SectionName.endswith( + getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false))) + return false; + } + + // Check if the global is private gcov data. + if (GV->getName().startswith("__llvm_gcov") || + GV->getName().startswith("__llvm_gcda")) + return false; + } -bool isAtomicCall(Instruction *I) { - if ( auto *CI = dyn_cast(I) ) { - Function *fun = CI->getCalledFunction(); - if (fun == NULL) + // Do not instrument acesses from different address spaces; we cannot deal + // with them. + if (Addr) { + Type *PtrTy = cast(Addr->getType()->getScalarType()); + if (PtrTy->getPointerAddressSpace() != 0) return false; + } - StringRef funName = fun->getName(); - // todo: come up with better rules for function name checking - if ( funName.contains("atomic_") ) { - return true; - } else if (funName.contains("atomic") ) { - return true; + return true; +} + +bool CDSPass::addrPointsToConstantData(Value *Addr) { + // If this is a GEP, just analyze its pointer operand. + if (GetElementPtrInst *GEP = dyn_cast(Addr)) + Addr = GEP->getPointerOperand(); + + if (GlobalVariable *GV = dyn_cast(Addr)) { + if (GV->isConstant()) { + // Reads from constant globals can not race with any writes. + NumOmittedReadsFromConstantGlobals++; + return true; + } + } else if (LoadInst *L = dyn_cast(Addr)) { + if (isVtableAccess(L)) { + // Reads from a vtable pointer can not race with any writes. + NumOmittedReadsFromVtable++; + return true; + } + } + return false; +} + +bool CDSPass::runOnFunction(Function &F) { + if (F.getName() == "main") { + F.setName("user_main"); + errs() << "main replaced by user_main\n"; + } + + if (true) { + initializeCallbacks( *F.getParent() ); + + SmallVector AllLoadsAndStores; + SmallVector LocalLoadsAndStores; + SmallVector AtomicAccesses; + + std::vector worklist; + + bool Res = false; + const DataLayout &DL = F.getParent()->getDataLayout(); + + // errs() << "--- " << F.getName() << "---\n"; + + for (auto &B : F) { + for (auto &I : B) { + if ( (&I)->isAtomic() || isAtomicCall(&I) ) { + AtomicAccesses.push_back(&I); + } else if (isa(I) || isa(I)) { + LocalLoadsAndStores.push_back(&I); + } else if (isa(I) || isa(I)) { + // not implemented yet + } + } + + chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL); + } + + for (auto Inst : AllLoadsAndStores) { + // Res |= instrumentLoadOrStore(Inst, DL); + // errs() << "load and store are replaced\n"; + } + + for (auto Inst : AtomicAccesses) { + Res |= instrumentAtomic(Inst, DL); + } + + if (F.getName() == "user_main") { + // F.dump(); } } return false; } -void printArgs (CallInst *CI) { - Function *fun = CI->getCalledFunction(); - StringRef funName = fun->getName(); +void CDSPass::chooseInstructionsToInstrument( + SmallVectorImpl &Local, SmallVectorImpl &All, + const DataLayout &DL) { + SmallPtrSet WriteTargets; + // Iterate from the end. + for (Instruction *I : reverse(Local)) { + if (StoreInst *Store = dyn_cast(I)) { + Value *Addr = Store->getPointerOperand(); + if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr)) + continue; + WriteTargets.insert(Addr); + } else { + LoadInst *Load = cast(I); + Value *Addr = Load->getPointerOperand(); + if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr)) + continue; + if (WriteTargets.count(Addr)) { + // We will write to this temp, so no reason to analyze the read. + NumOmittedReadsBeforeWrite++; + continue; + } + if (addrPointsToConstantData(Addr)) { + // Addr points to some constant data -- it can not race with any writes. + continue; + } + } + Value *Addr = isa(*I) + ? cast(I)->getPointerOperand() + : cast(I)->getPointerOperand(); + if (isa(GetUnderlyingObject(Addr, DL)) && + !PointerMayBeCaptured(Addr, true, true)) { + // The variable is addressable but not captured, so it cannot be + // referenced from a different thread and participate in a data race + // (see llvm/Analysis/CaptureTracking.h for details). + NumOmittedNonCaptured++; + continue; + } + All.push_back(I); + } + Local.clear(); +} - User::op_iterator begin = CI->arg_begin(); - User::op_iterator end = CI->arg_end(); - if ( funName.contains("atomic_") ) { - std::vector parameters; +bool CDSPass::instrumentLoadOrStore(Instruction *I, + const DataLayout &DL) { + IRBuilder<> IRB(I); + bool IsWrite = isa(*I); + Value *Addr = IsWrite + ? cast(I)->getPointerOperand() + : cast(I)->getPointerOperand(); + + // swifterror memory addresses are mem2reg promoted by instruction selection. + // As such they cannot have regular uses like an instrumentation function and + // it makes no sense to track them as memory. + if (Addr->isSwiftError()) + return false; + + int Idx = getMemoryAccessFuncIndex(Addr, DL); - for (User::op_iterator it = begin; it != end; ++it) { - Value *param = *it; - parameters.push_back(param); - errs() << *param << " type: " << *param->getType() << "\n"; +// not supported by CDS yet +/* if (IsWrite && isVtableAccess(I)) { + LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n"); + Value *StoredValue = cast(I)->getValueOperand(); + // StoredValue may be a vector type if we are storing several vptrs at once. + // In this case, just take the first element of the vector since this is + // enough to find vptr races. + if (isa(StoredValue->getType())) + StoredValue = IRB.CreateExtractElement( + StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0)); + if (StoredValue->getType()->isIntegerTy()) + StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy()); + // Call TsanVptrUpdate. + IRB.CreateCall(TsanVptrUpdate, + {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), + IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())}); + NumInstrumentedVtableWrites++; + return true; + } + + if (!IsWrite && isVtableAccess(I)) { + IRB.CreateCall(TsanVptrLoad, + IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); + NumInstrumentedVtableReads++; + return true; + } +*/ + + Value *OnAccessFunc = nullptr; + OnAccessFunc = IsWrite ? CDSStore[Idx] : CDSLoad[Idx]; + + Type *ArgType = IRB.CreatePointerCast(Addr, Addr->getType())->getType(); + + if ( ArgType != Int8PtrTy && ArgType != Int16PtrTy && + ArgType != Int32PtrTy && ArgType != Int64PtrTy ) { + //errs() << "A load or store of type "; + //errs() << *ArgType; + //errs() << " is passed in\n"; + return false; // if other types of load or stores are passed in + } + IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, Addr->getType())); + if (IsWrite) NumInstrumentedWrites++; + else NumInstrumentedReads++; + return true; +} + +bool CDSPass::instrumentAtomic(Instruction * I, const DataLayout &DL) { + IRBuilder<> IRB(I); + // LLVMContext &Ctx = IRB.getContext(); + + if (auto *CI = dyn_cast(I)) { + return instrumentAtomicCall(CI, DL); + } + + Value *position = getPosition(I, IRB); + + if (LoadInst *LI = dyn_cast(I)) { + Value *Addr = LI->getPointerOperand(); + int Idx=getMemoryAccessFuncIndex(Addr, DL); + int atomic_order_index = getAtomicOrderIndex(LI->getOrdering()); + Value *order = ConstantInt::get(OrdTy, atomic_order_index); + Value *args[] = {Addr, order, position}; + Instruction* funcInst=CallInst::Create(CDSAtomicLoad[Idx], args); + ReplaceInstWithInst(LI, funcInst); + } else if (StoreInst *SI = dyn_cast(I)) { + Value *Addr = SI->getPointerOperand(); + int Idx=getMemoryAccessFuncIndex(Addr, DL); + int atomic_order_index = getAtomicOrderIndex(SI->getOrdering()); + Value *val = SI->getValueOperand(); + Value *order = ConstantInt::get(OrdTy, atomic_order_index); + Value *args[] = {Addr, val, order, position}; + Instruction* funcInst=CallInst::Create(CDSAtomicStore[Idx], args); + ReplaceInstWithInst(SI, funcInst); + } else if (AtomicRMWInst *RMWI = dyn_cast(I)) { + Value *Addr = RMWI->getPointerOperand(); + int Idx=getMemoryAccessFuncIndex(Addr, DL); + int atomic_order_index = getAtomicOrderIndex(RMWI->getOrdering()); + Value *val = RMWI->getValOperand(); + Value *order = ConstantInt::get(OrdTy, atomic_order_index); + Value *args[] = {Addr, val, order, position}; + Instruction* funcInst = CallInst::Create(CDSAtomicRMW[RMWI->getOperation()][Idx], args); + ReplaceInstWithInst(RMWI, funcInst); + } else if (AtomicCmpXchgInst *CASI = dyn_cast(I)) { + IRBuilder<> IRB(CASI); + + Value *Addr = CASI->getPointerOperand(); + int Idx=getMemoryAccessFuncIndex(Addr, DL); + + const unsigned ByteSize = 1U << Idx; + const unsigned BitSize = ByteSize * 8; + Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); + Type *PtrTy = Ty->getPointerTo(); + + Value *CmpOperand = IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty); + Value *NewOperand = IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty); + + int atomic_order_index_succ = getAtomicOrderIndex(CASI->getSuccessOrdering()); + int atomic_order_index_fail = getAtomicOrderIndex(CASI->getFailureOrdering()); + Value *order_succ = ConstantInt::get(OrdTy, atomic_order_index_succ); + Value *order_fail = ConstantInt::get(OrdTy, atomic_order_index_fail); + + Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), + CmpOperand, NewOperand, + order_succ, order_fail, position}; + + CallInst *funcInst = IRB.CreateCall(CDSAtomicCAS_V1[Idx], Args); + Value *Success = IRB.CreateICmpEQ(funcInst, CmpOperand); + + Value *OldVal = funcInst; + Type *OrigOldValTy = CASI->getNewValOperand()->getType(); + if (Ty != OrigOldValTy) { + // The value is a pointer, so we need to cast the return value. + OldVal = IRB.CreateIntToPtr(funcInst, OrigOldValTy); } + + Value *Res = + IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0); + Res = IRB.CreateInsertValue(Res, Success, 1); + + I->replaceAllUsesWith(Res); + I->eraseFromParent(); + } else if (FenceInst *FI = dyn_cast(I)) { + int atomic_order_index = getAtomicOrderIndex(FI->getOrdering()); + Value *order = ConstantInt::get(OrdTy, atomic_order_index); + Value *Args[] = {order, position}; + + CallInst *funcInst = CallInst::Create(CDSAtomicThreadFence, Args); + ReplaceInstWithInst(FI, funcInst); + // errs() << "Thread Fences replaced\n"; } + return true; +} + +bool CDSPass::isAtomicCall(Instruction *I) { + if ( auto *CI = dyn_cast(I) ) { + Function *fun = CI->getCalledFunction(); + if (fun == NULL) + return false; + StringRef funName = fun->getName(); + // todo: come up with better rules for function name checking + if ( funName.contains("atomic_") ) { + return true; + } else if (funName.contains("atomic") ) { + return true; + } + } + + return false; } bool CDSPass::instrumentAtomicCall(CallInst *CI, const DataLayout &DL) { @@ -290,7 +574,7 @@ bool CDSPass::instrumentAtomicCall(CallInst *CI, const DataLayout &DL) { Value *val = IRB.CreateBitOrPointerCast(parameters[1], Ty); Value *args[] = {ptr, val, position}; - Instruction* funcInst=CallInst::Create(CDSAtomicInit[Idx], args); + Instruction* funcInst = CallInst::Create(CDSAtomicInit[Idx], args); ReplaceInstWithInst(CI, funcInst); return true; @@ -309,26 +593,26 @@ bool CDSPass::instrumentAtomicCall(CallInst *CI, const DataLayout &DL) { (int) AtomicOrderingCABI::seq_cst); Value *args[] = {ptr, order, position}; - Instruction* funcInst=CallInst::Create(CDSAtomicLoad[Idx], args); + Instruction* funcInst = CallInst::Create(CDSAtomicLoad[Idx], args); ReplaceInstWithInst(CI, funcInst); return true; } else if (funName.contains("atomic") && - funName.contains("load")) { - // does this version of call always have an atomic order as an argument? - Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy); - Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy); - Value *args[] = {ptr, order, position}; - - //Instruction* funcInst=CallInst::Create(CDSAtomicLoad[Idx], args); - CallInst *funcInst = IRB.CreateCall(CDSAtomicLoad[Idx], args); - Value *RetVal = IRB.CreateIntToPtr(funcInst, CI->getType()); - - CI->replaceAllUsesWith(RetVal); - CI->eraseFromParent(); - - return true; - } + funName.contains("load")) { + // does this version of call always have an atomic order as an argument? + Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy); + Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy); + Value *args[] = {ptr, order, position}; + + //Instruction* funcInst=CallInst::Create(CDSAtomicLoad[Idx], args); + CallInst *funcInst = IRB.CreateCall(CDSAtomicLoad[Idx], args); + Value *RetVal = IRB.CreateIntToPtr(funcInst, CI->getType()); + + CI->replaceAllUsesWith(RetVal); + CI->eraseFromParent(); + + return true; + } // atomic_store; args = {obj, val, order} if (funName.contains("atomic_store")) { @@ -345,26 +629,25 @@ bool CDSPass::instrumentAtomicCall(CallInst *CI, const DataLayout &DL) { (int) AtomicOrderingCABI::seq_cst); Value *args[] = {ptr, val, order, position}; - Instruction* funcInst=CallInst::Create(CDSAtomicStore[Idx], args); + Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args); ReplaceInstWithInst(CI, funcInst); return true; } else if (funName.contains("atomic") && - funName.contains("EEEE5store")) { - // does this version of call always have an atomic order as an argument? - Value *OrigVal = parameters[1]; - - Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy); - Value *val = IRB.CreatePointerCast(OrigVal, Ty); - Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy); - Value *args[] = {ptr, val, order, position}; + funName.contains("EEEE5store")) { + // does this version of call always have an atomic order as an argument? + Value *OrigVal = parameters[1]; - Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args); - ReplaceInstWithInst(CI, funcInst); + Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy); + Value *val = IRB.CreatePointerCast(OrigVal, Ty); + Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy); + Value *args[] = {ptr, val, order, position}; - return true; - } + Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args); + ReplaceInstWithInst(CI, funcInst); + return true; + } // atomic_fetch_*; args = {obj, val, order} if (funName.contains("atomic_fetch_") || @@ -400,20 +683,20 @@ bool CDSPass::instrumentAtomicCall(CallInst *CI, const DataLayout &DL) { (int) AtomicOrderingCABI::seq_cst); Value *args[] = {ptr, val, order, position}; - Instruction* funcInst=CallInst::Create(CDSAtomicRMW[op][Idx], args); + Instruction* funcInst = CallInst::Create(CDSAtomicRMW[op][Idx], args); ReplaceInstWithInst(CI, funcInst); return true; } else if (funName.contains("fetch")) { - errs() << "atomic exchange captured. Not implemented yet. "; - errs() << "See source file :"; - getPositionPrint(CI, IRB); - } else if (funName.contains("exchange") && - !funName.contains("compare_exchange") ) { - errs() << "atomic exchange captured. Not implemented yet. "; - errs() << "See source file :"; - getPositionPrint(CI, IRB); - } + errs() << "atomic exchange captured. Not implemented yet. "; + errs() << "See source file :"; + getPosition(CI, IRB, true); + } else if (funName.contains("exchange") && + !funName.contains("compare_exchange") ) { + errs() << "atomic exchange captured. Not implemented yet. "; + errs() << "See source file :"; + getPosition(CI, IRB, true); + } /* atomic_compare_exchange_*; args = {obj, expected, new value, order1, order2} @@ -439,344 +722,46 @@ bool CDSPass::instrumentAtomicCall(CallInst *CI, const DataLayout &DL) { Value *args[] = {Addr, CmpOperand, NewOperand, order_succ, order_fail, position}; - Instruction* funcInst=CallInst::Create(CDSAtomicCAS_V2[Idx], args); + Instruction* funcInst = CallInst::Create(CDSAtomicCAS_V2[Idx], args); ReplaceInstWithInst(CI, funcInst); return true; } else if ( funName.contains("compare_exchange_strong") || - funName.contains("compare_exchange_weak") ) { - Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy); - Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy); - Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty); - - Value *order_succ, *order_fail; - order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy); - order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy); + funName.contains("compare_exchange_weak") ) { + Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy); + Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy); + Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty); - Value *args[] = {Addr, CmpOperand, NewOperand, - order_succ, order_fail, position}; - Instruction* funcInst=CallInst::Create(CDSAtomicCAS_V2[Idx], args); - ReplaceInstWithInst(CI, funcInst); + Value *order_succ, *order_fail; + order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy); + order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy); - return true; - } + Value *args[] = {Addr, CmpOperand, NewOperand, + order_succ, order_fail, position}; + Instruction* funcInst = CallInst::Create(CDSAtomicCAS_V2[Idx], args); + ReplaceInstWithInst(CI, funcInst); + return true; + } return false; } -static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) { - // Peel off GEPs and BitCasts. - Addr = Addr->stripInBoundsOffsets(); - - if (GlobalVariable *GV = dyn_cast(Addr)) { - if (GV->hasSection()) { - StringRef SectionName = GV->getSection(); - // Check if the global is in the PGO counters section. - auto OF = Triple(M->getTargetTriple()).getObjectFormat(); - if (SectionName.endswith( - getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false))) - return false; - } - - // Check if the global is private gcov data. - if (GV->getName().startswith("__llvm_gcov") || - GV->getName().startswith("__llvm_gcda")) - return false; - } - - // Do not instrument acesses from different address spaces; we cannot deal - // with them. - if (Addr) { - Type *PtrTy = cast(Addr->getType()->getScalarType()); - if (PtrTy->getPointerAddressSpace() != 0) - return false; - } - - return true; -} - -bool CDSPass::addrPointsToConstantData(Value *Addr) { - // If this is a GEP, just analyze its pointer operand. - if (GetElementPtrInst *GEP = dyn_cast(Addr)) - Addr = GEP->getPointerOperand(); - - if (GlobalVariable *GV = dyn_cast(Addr)) { - if (GV->isConstant()) { - // Reads from constant globals can not race with any writes. - NumOmittedReadsFromConstantGlobals++; - return true; - } - } else if (LoadInst *L = dyn_cast(Addr)) { - if (isVtableAccess(L)) { - // Reads from a vtable pointer can not race with any writes. - NumOmittedReadsFromVtable++; - return true; - } - } - return false; -} - -bool CDSPass::runOnFunction(Function &F) { - if (F.getName() == "main") { - F.setName("user_main"); - errs() << "main replaced by user_main\n"; - } - - if (true) { - initializeCallbacks( *F.getParent() ); - - SmallVector AllLoadsAndStores; - SmallVector LocalLoadsAndStores; - SmallVector AtomicAccesses; - - std::vector worklist; - - bool Res = false; - const DataLayout &DL = F.getParent()->getDataLayout(); - - errs() << "--- " << F.getName() << "---\n"; - - for (auto &B : F) { - for (auto &I : B) { - if ( (&I)->isAtomic() || isAtomicCall(&I) ) { - AtomicAccesses.push_back(&I); - } else if (isa(I) || isa(I)) { - LocalLoadsAndStores.push_back(&I); - } else if (isa(I) || isa(I)) { - // not implemented yet - } - } - - chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL); - } - - for (auto Inst : AllLoadsAndStores) { -// Res |= instrumentLoadOrStore(Inst, DL); -// errs() << "load and store are replaced\n"; - } - - for (auto Inst : AtomicAccesses) { - Res |= instrumentAtomic(Inst, DL); - } - - if (F.getName() == "user_main") { - // F.dump(); - } - - } - - return false; -} - -void CDSPass::chooseInstructionsToInstrument( - SmallVectorImpl &Local, SmallVectorImpl &All, - const DataLayout &DL) { - SmallPtrSet WriteTargets; - // Iterate from the end. - for (Instruction *I : reverse(Local)) { - if (StoreInst *Store = dyn_cast(I)) { - Value *Addr = Store->getPointerOperand(); - if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr)) - continue; - WriteTargets.insert(Addr); - } else { - LoadInst *Load = cast(I); - Value *Addr = Load->getPointerOperand(); - if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr)) - continue; - if (WriteTargets.count(Addr)) { - // We will write to this temp, so no reason to analyze the read. - NumOmittedReadsBeforeWrite++; - continue; - } - if (addrPointsToConstantData(Addr)) { - // Addr points to some constant data -- it can not race with any writes. - continue; - } - } - Value *Addr = isa(*I) - ? cast(I)->getPointerOperand() - : cast(I)->getPointerOperand(); - if (isa(GetUnderlyingObject(Addr, DL)) && - !PointerMayBeCaptured(Addr, true, true)) { - // The variable is addressable but not captured, so it cannot be - // referenced from a different thread and participate in a data race - // (see llvm/Analysis/CaptureTracking.h for details). - NumOmittedNonCaptured++; - continue; - } - All.push_back(I); - } - Local.clear(); -} - - -bool CDSPass::instrumentLoadOrStore(Instruction *I, - const DataLayout &DL) { - IRBuilder<> IRB(I); - bool IsWrite = isa(*I); - Value *Addr = IsWrite - ? cast(I)->getPointerOperand() - : cast(I)->getPointerOperand(); - - // swifterror memory addresses are mem2reg promoted by instruction selection. - // As such they cannot have regular uses like an instrumentation function and - // it makes no sense to track them as memory. - if (Addr->isSwiftError()) - return false; - - int Idx = getMemoryAccessFuncIndex(Addr, DL); - - -// not supported by CDS yet -/* if (IsWrite && isVtableAccess(I)) { - LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n"); - Value *StoredValue = cast(I)->getValueOperand(); - // StoredValue may be a vector type if we are storing several vptrs at once. - // In this case, just take the first element of the vector since this is - // enough to find vptr races. - if (isa(StoredValue->getType())) - StoredValue = IRB.CreateExtractElement( - StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0)); - if (StoredValue->getType()->isIntegerTy()) - StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy()); - // Call TsanVptrUpdate. - IRB.CreateCall(TsanVptrUpdate, - {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()), - IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())}); - NumInstrumentedVtableWrites++; - return true; - } - - if (!IsWrite && isVtableAccess(I)) { - IRB.CreateCall(TsanVptrLoad, - IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); - NumInstrumentedVtableReads++; - return true; - } -*/ - - Value *OnAccessFunc = nullptr; - OnAccessFunc = IsWrite ? CDSStore[Idx] : CDSLoad[Idx]; - - Type *ArgType = IRB.CreatePointerCast(Addr, Addr->getType())->getType(); - - if ( ArgType != Int8PtrTy && ArgType != Int16PtrTy && - ArgType != Int32PtrTy && ArgType != Int64PtrTy ) { - //errs() << "A load or store of type "; - //errs() << *ArgType; - //errs() << " is passed in\n"; - return false; // if other types of load or stores are passed in - } - IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, Addr->getType())); - if (IsWrite) NumInstrumentedWrites++; - else NumInstrumentedReads++; - return true; -} - -bool CDSPass::instrumentAtomic(Instruction * I, const DataLayout &DL) { - IRBuilder<> IRB(I); - // LLVMContext &Ctx = IRB.getContext(); - - if (auto *CI = dyn_cast(I)) { - return instrumentAtomicCall(CI, DL); - } - - Value *position = getPosition(I, IRB); - - if (LoadInst *LI = dyn_cast(I)) { - Value *Addr = LI->getPointerOperand(); - int Idx=getMemoryAccessFuncIndex(Addr, DL); - int atomic_order_index = getAtomicOrderIndex(LI->getOrdering()); - Value *order = ConstantInt::get(OrdTy, atomic_order_index); - Value *args[] = {Addr, order, position}; - Instruction* funcInst=CallInst::Create(CDSAtomicLoad[Idx], args); - ReplaceInstWithInst(LI, funcInst); - } else if (StoreInst *SI = dyn_cast(I)) { - Value *Addr = SI->getPointerOperand(); - int Idx=getMemoryAccessFuncIndex(Addr, DL); - int atomic_order_index = getAtomicOrderIndex(SI->getOrdering()); - Value *val = SI->getValueOperand(); - Value *order = ConstantInt::get(OrdTy, atomic_order_index); - Value *args[] = {Addr, val, order, position}; - Instruction* funcInst=CallInst::Create(CDSAtomicStore[Idx], args); - ReplaceInstWithInst(SI, funcInst); - } else if (AtomicRMWInst *RMWI = dyn_cast(I)) { - Value *Addr = RMWI->getPointerOperand(); - int Idx=getMemoryAccessFuncIndex(Addr, DL); - int atomic_order_index = getAtomicOrderIndex(RMWI->getOrdering()); - Value *val = RMWI->getValOperand(); - Value *order = ConstantInt::get(OrdTy, atomic_order_index); - Value *args[] = {Addr, val, order, position}; - Instruction* funcInst = CallInst::Create(CDSAtomicRMW[RMWI->getOperation()][Idx], args); - ReplaceInstWithInst(RMWI, funcInst); - } else if (AtomicCmpXchgInst *CASI = dyn_cast(I)) { - IRBuilder<> IRB(CASI); - - Value *Addr = CASI->getPointerOperand(); - int Idx=getMemoryAccessFuncIndex(Addr, DL); - - const unsigned ByteSize = 1U << Idx; - const unsigned BitSize = ByteSize * 8; - Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); - Type *PtrTy = Ty->getPointerTo(); - - Value *CmpOperand = IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty); - Value *NewOperand = IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty); - - int atomic_order_index_succ = getAtomicOrderIndex(CASI->getSuccessOrdering()); - int atomic_order_index_fail = getAtomicOrderIndex(CASI->getFailureOrdering()); - Value *order_succ = ConstantInt::get(OrdTy, atomic_order_index_succ); - Value *order_fail = ConstantInt::get(OrdTy, atomic_order_index_fail); - - Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), - CmpOperand, NewOperand, - order_succ, order_fail, position}; - - CallInst *funcInst = IRB.CreateCall(CDSAtomicCAS_V1[Idx], Args); - Value *Success = IRB.CreateICmpEQ(funcInst, CmpOperand); - - Value *OldVal = funcInst; - Type *OrigOldValTy = CASI->getNewValOperand()->getType(); - if (Ty != OrigOldValTy) { - // The value is a pointer, so we need to cast the return value. - OldVal = IRB.CreateIntToPtr(funcInst, OrigOldValTy); - } - - Value *Res = - IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0); - Res = IRB.CreateInsertValue(Res, Success, 1); - - I->replaceAllUsesWith(Res); - I->eraseFromParent(); - } else if (FenceInst *FI = dyn_cast(I)) { - int atomic_order_index = getAtomicOrderIndex(FI->getOrdering()); - Value *order = ConstantInt::get(OrdTy, atomic_order_index); - Value *Args[] = {order, position}; - - CallInst *funcInst = CallInst::Create(CDSAtomicThreadFence, Args); - ReplaceInstWithInst(FI, funcInst); -// errs() << "Thread Fences replaced\n"; - } - return true; -} - int CDSPass::getMemoryAccessFuncIndex(Value *Addr, - const DataLayout &DL) { - Type *OrigPtrTy = Addr->getType(); - Type *OrigTy = cast(OrigPtrTy)->getElementType(); - assert(OrigTy->isSized()); - uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy); - if (TypeSize != 8 && TypeSize != 16 && - TypeSize != 32 && TypeSize != 64 && TypeSize != 128) { - NumAccessesWithBadSize++; - // Ignore all unusual sizes. - return -1; - } - size_t Idx = countTrailingZeros(TypeSize / 8); - assert(Idx < kNumberOfAccessSizes); - return Idx; + const DataLayout &DL) { + Type *OrigPtrTy = Addr->getType(); + Type *OrigTy = cast(OrigPtrTy)->getElementType(); + assert(OrigTy->isSized()); + uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy); + if (TypeSize != 8 && TypeSize != 16 && + TypeSize != 32 && TypeSize != 64 && TypeSize != 128) { + NumAccessesWithBadSize++; + // Ignore all unusual sizes. + return -1; + } + size_t Idx = countTrailingZeros(TypeSize / 8); + assert(Idx < kNumberOfAccessSizes); + return Idx; } @@ -784,8 +769,8 @@ char CDSPass::ID = 0; // Automatically enable the pass. static void registerCDSPass(const PassManagerBuilder &, - legacy::PassManagerBase &PM) { - PM.add(new CDSPass()); + legacy::PassManagerBase &PM) { + PM.add(new CDSPass()); } static RegisterStandardPasses RegisterMyPass(PassManagerBuilder::EP_OptimizerLast,