X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FCodeGen%2FAtomicExpandPass.cpp;h=045c8076d7e7c4c7288a8906855435485c8392c4;hp=077c52b19a7a3d1907cdd3dcda9eb967ccaf0f51;hb=HEAD;hpb=49c02eaad55b68f58d4abdbc22eb08b92a024437;ds=sidebyside diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp index 077c52b19a7..045c8076d7e 100644 --- a/lib/CodeGen/AtomicExpandPass.cpp +++ b/lib/CodeGen/AtomicExpandPass.cpp @@ -15,6 +15,7 @@ // //===----------------------------------------------------------------------===// +#include "TaintRelaxedAtomicsUtils.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" @@ -516,10 +517,25 @@ bool AtomicExpand::expandAtomicOpToLLSC( Builder.SetInsertPoint(LoopBB); Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder); + // XXX-update: For relaxed RMWs (i.e., fetch_* operations), we still need to + // taint the load part. However, we only need to taint those whose results are + // not immediately used by a conditional branch or a store address. + Value* StoreAddr = Addr; + auto* LoadedPartInst = dyn_cast(Loaded); + assert(LoadedPartInst && "Load part of RMW should be an instruction!"); + if (MemOpOrder != Acquire && MemOpOrder != AcquireRelease && + MemOpOrder != SequentiallyConsistent) { + // Also check whether the result is used immediately. If so, taint the + // address of the upcoming store-exclusive. + if (NeedExtraConstraints(I)) { + StoreAddr = taintRMWStoreAddressWithLoadPart(Builder, Addr, LoadedPartInst); + } + } + Value *NewVal = PerformOp(Builder, Loaded); Value *StoreSuccess = - TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder); + TLI->emitStoreConditional(Builder, NewVal, StoreAddr, MemOpOrder); Value *TryAgain = Builder.CreateICmpNE( StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain"); Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);