1 //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass (at IR level) to replace atomic instructions with
11 // either (intrinsic-based) load-linked/store-conditional loops or
14 //===----------------------------------------------------------------------===//
16 #include "llvm/CodeGen/AtomicExpandUtils.h"
17 #include "llvm/CodeGen/Passes.h"
18 #include "llvm/IR/Function.h"
19 #include "llvm/IR/IRBuilder.h"
20 #include "llvm/IR/InstIterator.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/Module.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Target/TargetLowering.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetSubtargetInfo.h"
31 #define DEBUG_TYPE "atomic-expand"
34 class AtomicExpand: public FunctionPass {
35 const TargetMachine *TM;
36 const TargetLowering *TLI;
38 static char ID; // Pass identification, replacement for typeid
39 explicit AtomicExpand(const TargetMachine *TM = nullptr)
40 : FunctionPass(ID), TM(TM), TLI(nullptr) {
41 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
44 bool runOnFunction(Function &F) override;
47 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
48 bool IsStore, bool IsLoad);
49 bool tryExpandAtomicLoad(LoadInst *LI);
50 bool expandAtomicLoadToLL(LoadInst *LI);
51 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
52 bool expandAtomicStore(StoreInst *SI);
53 bool tryExpandAtomicRMW(AtomicRMWInst *AI);
54 bool expandAtomicOpToLLSC(
55 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
56 std::function<Value *(IRBuilder<> &, Value *)> PerformOp);
57 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
58 bool isIdempotentRMW(AtomicRMWInst *AI);
59 bool simplifyIdempotentRMW(AtomicRMWInst *AI);
63 char AtomicExpand::ID = 0;
64 char &llvm::AtomicExpandID = AtomicExpand::ID;
65 INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
66 "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
69 FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
70 return new AtomicExpand(TM);
73 bool AtomicExpand::runOnFunction(Function &F) {
74 if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
76 TLI = TM->getSubtargetImpl(F)->getTargetLowering();
78 SmallVector<Instruction *, 1> AtomicInsts;
80 // Changing control-flow while iterating through it is a bad idea, so gather a
81 // list of all atomic instructions before we start.
82 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
84 AtomicInsts.push_back(&*I);
87 bool MadeChange = false;
88 for (auto I : AtomicInsts) {
89 auto LI = dyn_cast<LoadInst>(I);
90 auto SI = dyn_cast<StoreInst>(I);
91 auto RMWI = dyn_cast<AtomicRMWInst>(I);
92 auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
93 assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
94 "Unknown atomic instruction");
96 auto FenceOrdering = Monotonic;
98 if (TLI->getInsertFencesForAtomic()) {
99 if (LI && isAtLeastAcquire(LI->getOrdering())) {
100 FenceOrdering = LI->getOrdering();
101 LI->setOrdering(Monotonic);
104 } else if (SI && isAtLeastRelease(SI->getOrdering())) {
105 FenceOrdering = SI->getOrdering();
106 SI->setOrdering(Monotonic);
109 } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
110 isAtLeastAcquire(RMWI->getOrdering()))) {
111 FenceOrdering = RMWI->getOrdering();
112 RMWI->setOrdering(Monotonic);
113 IsStore = IsLoad = true;
114 } else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
115 (isAtLeastRelease(CASI->getSuccessOrdering()) ||
116 isAtLeastAcquire(CASI->getSuccessOrdering()))) {
117 // If a compare and swap is lowered to LL/SC, we can do smarter fence
118 // insertion, with a stronger one on the success path than on the
119 // failure path. As a result, fence insertion is directly done by
120 // expandAtomicCmpXchg in that case.
121 FenceOrdering = CASI->getSuccessOrdering();
122 CASI->setSuccessOrdering(Monotonic);
123 CASI->setFailureOrdering(Monotonic);
124 IsStore = IsLoad = true;
127 if (FenceOrdering != Monotonic) {
128 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
133 MadeChange |= tryExpandAtomicLoad(LI);
134 } else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) {
135 MadeChange |= expandAtomicStore(SI);
137 // There are two different ways of expanding RMW instructions:
138 // - into a load if it is idempotent
139 // - into a Cmpxchg/LL-SC loop otherwise
140 // we try them in that order.
142 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
145 MadeChange |= tryExpandAtomicRMW(RMWI);
147 } else if (CASI && TLI->shouldExpandAtomicCmpXchgInIR(CASI)) {
148 MadeChange |= expandAtomicCmpXchg(CASI);
154 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
155 bool IsStore, bool IsLoad) {
156 IRBuilder<> Builder(I);
158 auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
160 auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
161 // The trailing fence is emitted before the instruction instead of after
162 // because there is no easy way of setting Builder insertion point after
163 // an instruction. So we must erase it from the BB, and insert it back
164 // in the right place.
165 // We have a guard here because not every atomic operation generates a
168 TrailingFence->removeFromParent();
169 TrailingFence->insertAfter(I);
172 return (LeadingFence || TrailingFence);
175 bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
176 switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
177 case TargetLoweringBase::AtomicExpansionKind::None:
179 case TargetLoweringBase::AtomicExpansionKind::LLSC:
180 return expandAtomicOpToLLSC(
181 LI, LI->getPointerOperand(), LI->getOrdering(),
182 [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; });
183 case TargetLoweringBase::AtomicExpansionKind::LLOnly:
184 return expandAtomicLoadToLL(LI);
185 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
186 return expandAtomicLoadToCmpXchg(LI);
188 llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
191 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
192 IRBuilder<> Builder(LI);
194 // On some architectures, load-linked instructions are atomic for larger
195 // sizes than normal loads. For example, the only 64-bit load guaranteed
196 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
198 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
199 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
201 LI->replaceAllUsesWith(Val);
202 LI->eraseFromParent();
207 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
208 IRBuilder<> Builder(LI);
209 AtomicOrdering Order = LI->getOrdering();
210 Value *Addr = LI->getPointerOperand();
211 Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
212 Constant *DummyVal = Constant::getNullValue(Ty);
214 Value *Pair = Builder.CreateAtomicCmpXchg(
215 Addr, DummyVal, DummyVal, Order,
216 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
217 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
219 LI->replaceAllUsesWith(Loaded);
220 LI->eraseFromParent();
225 bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
226 // This function is only called on atomic stores that are too large to be
227 // atomic if implemented as a native store. So we replace them by an
228 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
229 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
230 // It is the responsibility of the target to only signal expansion via
231 // shouldExpandAtomicRMW in cases where this is required and possible.
232 IRBuilder<> Builder(SI);
234 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
235 SI->getValueOperand(), SI->getOrdering());
236 SI->eraseFromParent();
238 // Now we have an appropriate swap instruction, lower it as usual.
239 return tryExpandAtomicRMW(AI);
242 static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
243 Value *Loaded, Value *NewVal,
244 AtomicOrdering MemOpOrder,
245 Value *&Success, Value *&NewLoaded) {
246 Value* Pair = Builder.CreateAtomicCmpXchg(
247 Addr, Loaded, NewVal, MemOpOrder,
248 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
249 Success = Builder.CreateExtractValue(Pair, 1, "success");
250 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
253 /// Emit IR to implement the given atomicrmw operation on values in registers,
254 /// returning the new value.
255 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
256 Value *Loaded, Value *Inc) {
259 case AtomicRMWInst::Xchg:
261 case AtomicRMWInst::Add:
262 return Builder.CreateAdd(Loaded, Inc, "new");
263 case AtomicRMWInst::Sub:
264 return Builder.CreateSub(Loaded, Inc, "new");
265 case AtomicRMWInst::And:
266 return Builder.CreateAnd(Loaded, Inc, "new");
267 case AtomicRMWInst::Nand:
268 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
269 case AtomicRMWInst::Or:
270 return Builder.CreateOr(Loaded, Inc, "new");
271 case AtomicRMWInst::Xor:
272 return Builder.CreateXor(Loaded, Inc, "new");
273 case AtomicRMWInst::Max:
274 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
275 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
276 case AtomicRMWInst::Min:
277 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
278 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
279 case AtomicRMWInst::UMax:
280 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
281 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
282 case AtomicRMWInst::UMin:
283 NewVal = Builder.CreateICmpULE(Loaded, Inc);
284 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
286 llvm_unreachable("Unknown atomic op");
290 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
291 switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
292 case TargetLoweringBase::AtomicExpansionKind::None:
294 case TargetLoweringBase::AtomicExpansionKind::LLSC:
295 return expandAtomicOpToLLSC(AI, AI->getPointerOperand(), AI->getOrdering(),
296 [&](IRBuilder<> &Builder, Value *Loaded) {
297 return performAtomicOp(AI->getOperation(),
299 AI->getValOperand());
301 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
302 return expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun);
304 llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
308 bool AtomicExpand::expandAtomicOpToLLSC(
309 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
310 std::function<Value *(IRBuilder<> &, Value *)> PerformOp) {
311 BasicBlock *BB = I->getParent();
312 Function *F = BB->getParent();
313 LLVMContext &Ctx = F->getContext();
315 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
317 // The standard expansion we produce is:
321 // %loaded = @load.linked(%addr)
322 // %new = some_op iN %loaded, %incr
323 // %stored = @store_conditional(%new, %addr)
324 // %try_again = icmp i32 ne %stored, 0
325 // br i1 %try_again, label %loop, label %atomicrmw.end
329 BasicBlock *ExitBB = BB->splitBasicBlock(I->getIterator(), "atomicrmw.end");
330 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
332 // This grabs the DebugLoc from I.
333 IRBuilder<> Builder(I);
335 // The split call above "helpfully" added a branch at the end of BB (to the
336 // wrong place), but we might want a fence too. It's easiest to just remove
337 // the branch entirely.
338 std::prev(BB->end())->eraseFromParent();
339 Builder.SetInsertPoint(BB);
340 Builder.CreateBr(LoopBB);
342 // Start the main loop block now that we've taken care of the preliminaries.
343 Builder.SetInsertPoint(LoopBB);
344 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
346 Value *NewVal = PerformOp(Builder, Loaded);
348 Value *StoreSuccess =
349 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
350 Value *TryAgain = Builder.CreateICmpNE(
351 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
352 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
354 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
356 I->replaceAllUsesWith(Loaded);
357 I->eraseFromParent();
362 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
363 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
364 AtomicOrdering FailureOrder = CI->getFailureOrdering();
365 Value *Addr = CI->getPointerOperand();
366 BasicBlock *BB = CI->getParent();
367 Function *F = BB->getParent();
368 LLVMContext &Ctx = F->getContext();
369 // If getInsertFencesForAtomic() returns true, then the target does not want
370 // to deal with memory orders, and emitLeading/TrailingFence should take care
371 // of everything. Otherwise, emitLeading/TrailingFence are no-op and we
372 // should preserve the ordering.
373 AtomicOrdering MemOpOrder =
374 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
376 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
378 // The full expansion we produce is:
382 // %loaded = @load.linked(%addr)
383 // %should_store = icmp eq %loaded, %desired
384 // br i1 %should_store, label %cmpxchg.trystore,
385 // label %cmpxchg.nostore
387 // %stored = @store_conditional(%new, %addr)
388 // %success = icmp eq i32 %stored, 0
389 // br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
392 // br label %cmpxchg.end
394 // @load_linked_fail_balance()?
395 // br label %cmpxchg.failure
398 // br label %cmpxchg.end
400 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
401 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
402 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
404 BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end");
405 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
406 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB);
407 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB);
408 auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
409 auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
411 // This grabs the DebugLoc from CI
412 IRBuilder<> Builder(CI);
414 // The split call above "helpfully" added a branch at the end of BB (to the
415 // wrong place), but we might want a fence too. It's easiest to just remove
416 // the branch entirely.
417 std::prev(BB->end())->eraseFromParent();
418 Builder.SetInsertPoint(BB);
419 TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
421 Builder.CreateBr(LoopBB);
423 // Start the main loop block now that we've taken care of the preliminaries.
424 Builder.SetInsertPoint(LoopBB);
425 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
427 Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
429 // If the cmpxchg doesn't actually need any ordering when it fails, we can
430 // jump straight past that fence instruction (if it exists).
431 Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
433 Builder.SetInsertPoint(TryStoreBB);
434 Value *StoreSuccess = TLI->emitStoreConditional(
435 Builder, CI->getNewValOperand(), Addr, MemOpOrder);
436 StoreSuccess = Builder.CreateICmpEQ(
437 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
438 Builder.CreateCondBr(StoreSuccess, SuccessBB,
439 CI->isWeak() ? FailureBB : LoopBB);
441 // Make sure later instructions don't get reordered with a fence if necessary.
442 Builder.SetInsertPoint(SuccessBB);
443 TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
445 Builder.CreateBr(ExitBB);
447 Builder.SetInsertPoint(NoStoreBB);
448 // In the failing case, where we don't execute the store-conditional, the
449 // target might want to balance out the load-linked with a dedicated
450 // instruction (e.g., on ARM, clearing the exclusive monitor).
451 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
452 Builder.CreateBr(FailureBB);
454 Builder.SetInsertPoint(FailureBB);
455 TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
457 Builder.CreateBr(ExitBB);
459 // Finally, we have control-flow based knowledge of whether the cmpxchg
460 // succeeded or not. We expose this to later passes by converting any
461 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
463 // Setup the builder so we can create any PHIs we need.
464 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
465 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
466 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
467 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
469 // Look for any users of the cmpxchg that are just comparing the loaded value
470 // against the desired one, and replace them with the CFG-derived version.
471 SmallVector<ExtractValueInst *, 2> PrunedInsts;
472 for (auto User : CI->users()) {
473 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
477 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
478 "weird extraction from { iN, i1 }");
480 if (EV->getIndices()[0] == 0)
481 EV->replaceAllUsesWith(Loaded);
483 EV->replaceAllUsesWith(Success);
485 PrunedInsts.push_back(EV);
488 // We can remove the instructions now we're no longer iterating through them.
489 for (auto EV : PrunedInsts)
490 EV->eraseFromParent();
492 if (!CI->use_empty()) {
493 // Some use of the full struct return that we don't understand has happened,
494 // so we've got to reconstruct it properly.
496 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
497 Res = Builder.CreateInsertValue(Res, Success, 1);
499 CI->replaceAllUsesWith(Res);
502 CI->eraseFromParent();
506 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
507 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
511 AtomicRMWInst::BinOp Op = RMWI->getOperation();
513 case AtomicRMWInst::Add:
514 case AtomicRMWInst::Sub:
515 case AtomicRMWInst::Or:
516 case AtomicRMWInst::Xor:
518 case AtomicRMWInst::And:
519 return C->isMinusOne();
520 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
526 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
527 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
528 tryExpandAtomicLoad(ResultingLoad);
534 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
535 CreateCmpXchgInstFun CreateCmpXchg) {
538 AtomicOrdering MemOpOrder =
539 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
540 Value *Addr = AI->getPointerOperand();
541 BasicBlock *BB = AI->getParent();
542 Function *F = BB->getParent();
543 LLVMContext &Ctx = F->getContext();
545 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
547 // The standard expansion we produce is:
549 // %init_loaded = load atomic iN* %addr
552 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
553 // %new = some_op iN %loaded, %incr
554 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
555 // %new_loaded = extractvalue { iN, i1 } %pair, 0
556 // %success = extractvalue { iN, i1 } %pair, 1
557 // br i1 %success, label %atomicrmw.end, label %loop
560 BasicBlock *ExitBB = BB->splitBasicBlock(AI->getIterator(), "atomicrmw.end");
561 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
563 // This grabs the DebugLoc from AI.
564 IRBuilder<> Builder(AI);
566 // The split call above "helpfully" added a branch at the end of BB (to the
567 // wrong place), but we want a load. It's easiest to just remove
568 // the branch entirely.
569 std::prev(BB->end())->eraseFromParent();
570 Builder.SetInsertPoint(BB);
571 LoadInst *InitLoaded = Builder.CreateLoad(Addr);
572 // Atomics require at least natural alignment.
573 InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits() / 8);
574 Builder.CreateBr(LoopBB);
576 // Start the main loop block now that we've taken care of the preliminaries.
577 Builder.SetInsertPoint(LoopBB);
578 PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
579 Loaded->addIncoming(InitLoaded, BB);
582 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
584 Value *NewLoaded = nullptr;
585 Value *Success = nullptr;
587 CreateCmpXchg(Builder, Addr, Loaded, NewVal, MemOpOrder,
589 assert(Success && NewLoaded);
591 Loaded->addIncoming(NewLoaded, LoopBB);
593 Builder.CreateCondBr(Success, ExitBB, LoopBB);
595 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
597 AI->replaceAllUsesWith(NewLoaded);
598 AI->eraseFromParent();