From: Peizhao Ou Date: Fri, 13 Apr 2018 17:05:47 +0000 (-0700) Subject: Adds address dependency for atomic loads (for relaxed load l, if any subsequent relax... X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=e82814b6938222a5e6e56c2f7fc24504c2573d71;p=oota-llvm.git Adds address dependency for atomic loads (for relaxed load l, if any subsequent relaxed load depends on l, we don't need to taint l) --- diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp index f4ff3787e63..7674e5cff92 100644 --- a/lib/CodeGen/CodeGenPrepare.cpp +++ b/lib/CodeGen/CodeGenPrepare.cpp @@ -221,6 +221,7 @@ FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { namespace { bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal); +bool LoadAddressDependOnValue(LoadInst* LI, Value* DepVal); Value* GetUntaintedAddress(Value* CurrentAddress); // The depth we trace down a variable to look for its dependence set. @@ -935,6 +936,49 @@ Instruction* findMostRecentDependenceUsage(LoadInst* LI, Instruction* LaterInst, return usage_inst; } +// XXX-comment: For relaxed load 'LI', and the first upcoming store/conditional +// branch instruction 'FirstInst', returns whether there are any intermediate +// instructions I (including 'FirstInst') that satisfy: +// 1. I is a load/store, and its address depends on 'LI'. +// 2. I is a conditional branch whose condition depends on 'LI'. +// Note that 'LI' and 'FirstInst' can be in different basic blocks, but LI's +// basic block can unconditionally jumps (by steps) to FirstInst's block. +bool NeedExtraConstraints(LoadInst* LI, Instruction* FirstInst) { + if (!FirstInst) { + return true; + } + auto* BB = LI->getParent(); + auto BBI = LI->getIterator(); + BBI++; + while (true) { + auto* I = &*BBI; + BBI++; + BranchInst *BI = dyn_cast(I); + if (BI && BI->isUnconditional()) { + BasicBlock *DestBB = BI->getSuccessor(0); + BBI = DestBB->begin(); + continue; + } + + if (I->getOpcode() == Instruction::Store) { + return !StoreAddressDependOnValue(dyn_cast(I), LI); + } else if (I->getOpcode() == Instruction::Load) { + if (I->isAtomic() && + LoadAddressDependOnValue(dyn_cast(I), LI)) { + // Normal loads are subject to be reordered by the backend, so we only + // rely on atomic loads. + return false; + } + } else if (I->getOpcode() == Instruction::Br) { + return !ConditionalBranchDependsOnValue(dyn_cast(I), LI); + } + if (I == FirstInst) { + return true; + } + } + return true; +} + // XXX-comment: Returns whether the code has been changed. bool AddFakeConditionalBranchAfterMonotonicLoads( SmallSet& MonotonicLoadInsts, DominatorTree* DT) { @@ -944,24 +988,10 @@ bool AddFakeConditionalBranchAfterMonotonicLoads( MonotonicLoadInsts.erase(LI); SmallVector ChainedBB; auto* FirstInst = findFirstStoreCondBranchInst(LI, &ChainedBB); - if (FirstInst != nullptr) { - if (FirstInst->getOpcode() == Instruction::Store) { - if (StoreAddressDependOnValue(dyn_cast(FirstInst), LI)) { - continue; - } - } else if (FirstInst->getOpcode() == Instruction::Br) { - if (ConditionalBranchDependsOnValue(dyn_cast(FirstInst), - LI)) { - continue; - } - } else { - IntrinsicInst* II = dyn_cast(FirstInst); - if (!II || II->getIntrinsicID() != Intrinsic::aarch64_stlxr) { - dbgs() << "FirstInst=" << *FirstInst << "\n"; - assert(false && "findFirstStoreCondBranchInst() should return a " - "store/condition branch instruction"); - } - } + + // First check whether existing load-store ordering constraints exist. + if (FirstInst != nullptr && !NeedExtraConstraints(LI, FirstInst)) { + continue; } // We really need to process the relaxed load now. @@ -1260,6 +1290,10 @@ bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal) { return dependenceSetInclusion(SI->getPointerOperand(), DepVal); } +bool LoadAddressDependOnValue(LoadInst* LI, Value* DepVal) { + return dependenceSetInclusion(LI->getPointerOperand(), DepVal); +} + bool StoreDependOnValue(StoreInst* SI, Value* Dep) { return dependenceSetInclusion(SI, Dep); }