From: Eric Christopher Date: Mon, 26 Jan 2015 19:45:40 +0000 (+0000) Subject: Cache the lookup of TargetLowering in the atomic expand pass. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=7936ef08bd1c3f6a33869382c8eb950c8508da01;p=oota-llvm.git Cache the lookup of TargetLowering in the atomic expand pass. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227121 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp index 12f6bd77d33..be4e6c24a8c 100644 --- a/lib/CodeGen/AtomicExpandPass.cpp +++ b/lib/CodeGen/AtomicExpandPass.cpp @@ -31,10 +31,11 @@ using namespace llvm; namespace { class AtomicExpand: public FunctionPass { const TargetMachine *TM; + const TargetLowering *TLI; public: static char ID; // Pass identification, replacement for typeid explicit AtomicExpand(const TargetMachine *TM = nullptr) - : FunctionPass(ID), TM(TM) { + : FunctionPass(ID), TM(TM), TLI(nullptr) { initializeAtomicExpandPass(*PassRegistry::getPassRegistry()); } @@ -69,7 +70,7 @@ FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) { bool AtomicExpand::runOnFunction(Function &F) { if (!TM || !TM->getSubtargetImpl()->enableAtomicExpand()) return false; - auto TargetLowering = TM->getSubtargetImpl()->getTargetLowering(); + TLI = TM->getSubtargetImpl()->getTargetLowering(); SmallVector AtomicInsts; @@ -91,7 +92,7 @@ bool AtomicExpand::runOnFunction(Function &F) { auto FenceOrdering = Monotonic; bool IsStore, IsLoad; - if (TargetLowering->getInsertFencesForAtomic()) { + if (TLI->getInsertFencesForAtomic()) { if (LI && isAtLeastAcquire(LI->getOrdering())) { FenceOrdering = LI->getOrdering(); LI->setOrdering(Monotonic); @@ -107,9 +108,9 @@ bool AtomicExpand::runOnFunction(Function &F) { FenceOrdering = RMWI->getOrdering(); RMWI->setOrdering(Monotonic); IsStore = IsLoad = true; - } else if (CASI && !TargetLowering->hasLoadLinkedStoreConditional() && - (isAtLeastRelease(CASI->getSuccessOrdering()) || - isAtLeastAcquire(CASI->getSuccessOrdering()))) { + } else if (CASI && !TLI->hasLoadLinkedStoreConditional() && + (isAtLeastRelease(CASI->getSuccessOrdering()) || + isAtLeastAcquire(CASI->getSuccessOrdering()))) { // If a compare and swap is lowered to LL/SC, we can do smarter fence // insertion, with a stronger one on the success path than on the // failure path. As a result, fence insertion is directly done by @@ -125,20 +126,19 @@ bool AtomicExpand::runOnFunction(Function &F) { } } - if (LI && TargetLowering->shouldExpandAtomicLoadInIR(LI)) { + if (LI && TLI->shouldExpandAtomicLoadInIR(LI)) { MadeChange |= expandAtomicLoad(LI); - } else if (SI && TargetLowering->shouldExpandAtomicStoreInIR(SI)) { + } else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) { MadeChange |= expandAtomicStore(SI); } else if (RMWI) { // There are two different ways of expanding RMW instructions: // - into a load if it is idempotent // - into a Cmpxchg/LL-SC loop otherwise // we try them in that order. - MadeChange |= (isIdempotentRMW(RMWI) && - simplifyIdempotentRMW(RMWI)) || - (TargetLowering->shouldExpandAtomicRMWInIR(RMWI) && - expandAtomicRMW(RMWI)); - } else if (CASI && TargetLowering->hasLoadLinkedStoreConditional()) { + MadeChange |= + (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) || + (TLI->shouldExpandAtomicRMWInIR(RMWI) && expandAtomicRMW(RMWI)); + } else if (CASI && TLI->hasLoadLinkedStoreConditional()) { MadeChange |= expandAtomicCmpXchg(CASI); } } @@ -149,13 +149,9 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order, bool IsStore, bool IsLoad) { IRBuilder<> Builder(I); - auto LeadingFence = - TM->getSubtargetImpl()->getTargetLowering()->emitLeadingFence( - Builder, Order, IsStore, IsLoad); + auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad); - auto TrailingFence = - TM->getSubtargetImpl()->getTargetLowering()->emitTrailingFence( - Builder, Order, IsStore, IsLoad); + auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad); // The trailing fence is emitted before the instruction instead of after // because there is no easy way of setting Builder insertion point after // an instruction. So we must erase it from the BB, and insert it back @@ -171,16 +167,13 @@ bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order, } bool AtomicExpand::expandAtomicLoad(LoadInst *LI) { - if (TM->getSubtargetImpl() - ->getTargetLowering() - ->hasLoadLinkedStoreConditional()) + if (TLI->hasLoadLinkedStoreConditional()) return expandAtomicLoadToLL(LI); else return expandAtomicLoadToCmpXchg(LI); } bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) { - auto TLI = TM->getSubtargetImpl()->getTargetLowering(); IRBuilder<> Builder(LI); // On some architectures, load-linked instructions are atomic for larger @@ -231,9 +224,7 @@ bool AtomicExpand::expandAtomicStore(StoreInst *SI) { } bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) { - if (TM->getSubtargetImpl() - ->getTargetLowering() - ->hasLoadLinkedStoreConditional()) + if (TLI->hasLoadLinkedStoreConditional()) return expandAtomicRMWToLLSC(AI); else return expandAtomicRMWToCmpXchg(AI); @@ -277,7 +268,6 @@ static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder, } bool AtomicExpand::expandAtomicRMWToLLSC(AtomicRMWInst *AI) { - auto TLI = TM->getSubtargetImpl()->getTargetLowering(); AtomicOrdering MemOpOrder = AI->getOrdering(); Value *Addr = AI->getPointerOperand(); BasicBlock *BB = AI->getParent(); @@ -397,7 +387,6 @@ bool AtomicExpand::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI) { } bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { - auto TLI = TM->getSubtargetImpl()->getTargetLowering(); AtomicOrdering SuccessOrder = CI->getSuccessOrdering(); AtomicOrdering FailureOrder = CI->getFailureOrdering(); Value *Addr = CI->getPointerOperand(); @@ -551,13 +540,10 @@ bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) { } bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) { - auto TLI = TM->getSubtargetImpl()->getTargetLowering(); - if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) { if (TLI->shouldExpandAtomicLoadInIR(ResultingLoad)) expandAtomicLoad(ResultingLoad); return true; } - return false; }