1 //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass (at IR level) to replace atomic instructions with
11 // target specific instruction which implement the same semantics in a way
12 // which better fits the target backend. This can include the use of either
13 // (intrinsic-based) load-linked/store-conditional loops, AtomicCmpXchg, or
16 //===----------------------------------------------------------------------===//
18 #include "llvm/CodeGen/AtomicExpandUtils.h"
19 #include "llvm/CodeGen/Passes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/InstIterator.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Target/TargetSubtargetInfo.h"
33 #define DEBUG_TYPE "atomic-expand"
36 class AtomicExpand: public FunctionPass {
37 const TargetMachine *TM;
38 const TargetLowering *TLI;
40 static char ID; // Pass identification, replacement for typeid
41 explicit AtomicExpand(const TargetMachine *TM = nullptr)
42 : FunctionPass(ID), TM(TM), TLI(nullptr) {
43 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
46 bool runOnFunction(Function &F) override;
49 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
50 bool IsStore, bool IsLoad);
51 IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
52 LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
53 bool tryExpandAtomicLoad(LoadInst *LI);
54 bool expandAtomicLoadToLL(LoadInst *LI);
55 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
56 StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
57 bool expandAtomicStore(StoreInst *SI);
58 bool tryExpandAtomicRMW(AtomicRMWInst *AI);
59 bool expandAtomicOpToLLSC(
60 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
61 std::function<Value *(IRBuilder<> &, Value *)> PerformOp);
62 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
63 bool isIdempotentRMW(AtomicRMWInst *AI);
64 bool simplifyIdempotentRMW(AtomicRMWInst *AI);
68 char AtomicExpand::ID = 0;
69 char &llvm::AtomicExpandID = AtomicExpand::ID;
70 INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
71 "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
74 FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
75 return new AtomicExpand(TM);
78 bool AtomicExpand::runOnFunction(Function &F) {
79 if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
81 TLI = TM->getSubtargetImpl(F)->getTargetLowering();
83 SmallVector<Instruction *, 1> AtomicInsts;
85 // Changing control-flow while iterating through it is a bad idea, so gather a
86 // list of all atomic instructions before we start.
87 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
89 AtomicInsts.push_back(&*I);
92 bool MadeChange = false;
93 for (auto I : AtomicInsts) {
94 auto LI = dyn_cast<LoadInst>(I);
95 auto SI = dyn_cast<StoreInst>(I);
96 auto RMWI = dyn_cast<AtomicRMWInst>(I);
97 auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
98 assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
99 "Unknown atomic instruction");
101 auto FenceOrdering = Monotonic;
102 bool IsStore, IsLoad;
103 if (TLI->getInsertFencesForAtomic()) {
104 if (LI && isAtLeastAcquire(LI->getOrdering())) {
105 FenceOrdering = LI->getOrdering();
106 LI->setOrdering(Monotonic);
109 } else if (SI && isAtLeastRelease(SI->getOrdering())) {
110 FenceOrdering = SI->getOrdering();
111 SI->setOrdering(Monotonic);
114 } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
115 isAtLeastAcquire(RMWI->getOrdering()))) {
116 FenceOrdering = RMWI->getOrdering();
117 RMWI->setOrdering(Monotonic);
118 IsStore = IsLoad = true;
119 } else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
120 (isAtLeastRelease(CASI->getSuccessOrdering()) ||
121 isAtLeastAcquire(CASI->getSuccessOrdering()))) {
122 // If a compare and swap is lowered to LL/SC, we can do smarter fence
123 // insertion, with a stronger one on the success path than on the
124 // failure path. As a result, fence insertion is directly done by
125 // expandAtomicCmpXchg in that case.
126 FenceOrdering = CASI->getSuccessOrdering();
127 CASI->setSuccessOrdering(Monotonic);
128 CASI->setFailureOrdering(Monotonic);
129 IsStore = IsLoad = true;
132 if (FenceOrdering != Monotonic) {
133 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
138 if (LI->getType()->isFloatingPointTy()) {
139 // TODO: add a TLI hook to control this so that each target can
140 // convert to lowering the original type one at a time.
141 LI = convertAtomicLoadToIntegerType(LI);
142 assert(LI->getType()->isIntegerTy() && "invariant broken");
146 MadeChange |= tryExpandAtomicLoad(LI);
148 if (SI->getValueOperand()->getType()->isFloatingPointTy()) {
149 // TODO: add a TLI hook to control this so that each target can
150 // convert to lowering the original type one at a time.
151 SI = convertAtomicStoreToIntegerType(SI);
152 assert(SI->getValueOperand()->getType()->isIntegerTy() &&
157 if (TLI->shouldExpandAtomicStoreInIR(SI))
158 MadeChange |= expandAtomicStore(SI);
160 // There are two different ways of expanding RMW instructions:
161 // - into a load if it is idempotent
162 // - into a Cmpxchg/LL-SC loop otherwise
163 // we try them in that order.
165 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
168 MadeChange |= tryExpandAtomicRMW(RMWI);
170 } else if (CASI && TLI->shouldExpandAtomicCmpXchgInIR(CASI)) {
171 MadeChange |= expandAtomicCmpXchg(CASI);
177 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
178 bool IsStore, bool IsLoad) {
179 IRBuilder<> Builder(I);
181 auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
183 auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
184 // The trailing fence is emitted before the instruction instead of after
185 // because there is no easy way of setting Builder insertion point after
186 // an instruction. So we must erase it from the BB, and insert it back
187 // in the right place.
188 // We have a guard here because not every atomic operation generates a
191 TrailingFence->removeFromParent();
192 TrailingFence->insertAfter(I);
195 return (LeadingFence || TrailingFence);
198 /// Get the iX type with the same bitwidth as T.
199 IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
200 const DataLayout &DL) {
201 EVT VT = TLI->getValueType(DL, T);
202 unsigned BitWidth = VT.getStoreSizeInBits();
203 assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
204 return IntegerType::get(T->getContext(), BitWidth);
207 /// Convert an atomic load of a non-integral type to an integer load of the
208 /// equivelent bitwidth. See the function comment on
209 /// convertAtomicStoreToIntegerType for background.
210 LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
211 auto *M = LI->getModule();
212 Type *NewTy = getCorrespondingIntegerType(LI->getType(),
215 IRBuilder<> Builder(LI);
217 Value *Addr = LI->getPointerOperand();
218 Type *PT = PointerType::get(NewTy,
219 Addr->getType()->getPointerAddressSpace());
220 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
222 auto *NewLI = Builder.CreateLoad(NewAddr);
223 NewLI->setAlignment(LI->getAlignment());
224 NewLI->setVolatile(LI->isVolatile());
225 NewLI->setAtomic(LI->getOrdering(), LI->getSynchScope());
226 DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
228 Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
229 LI->replaceAllUsesWith(NewVal);
230 LI->eraseFromParent();
234 bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
235 switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
236 case TargetLoweringBase::AtomicExpansionKind::None:
238 case TargetLoweringBase::AtomicExpansionKind::LLSC:
239 return expandAtomicOpToLLSC(
240 LI, LI->getPointerOperand(), LI->getOrdering(),
241 [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; });
242 case TargetLoweringBase::AtomicExpansionKind::LLOnly:
243 return expandAtomicLoadToLL(LI);
244 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
245 return expandAtomicLoadToCmpXchg(LI);
247 llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
250 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
251 IRBuilder<> Builder(LI);
253 // On some architectures, load-linked instructions are atomic for larger
254 // sizes than normal loads. For example, the only 64-bit load guaranteed
255 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
257 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
258 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
260 LI->replaceAllUsesWith(Val);
261 LI->eraseFromParent();
266 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
267 IRBuilder<> Builder(LI);
268 AtomicOrdering Order = LI->getOrdering();
269 Value *Addr = LI->getPointerOperand();
270 Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
271 Constant *DummyVal = Constant::getNullValue(Ty);
273 Value *Pair = Builder.CreateAtomicCmpXchg(
274 Addr, DummyVal, DummyVal, Order,
275 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
276 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
278 LI->replaceAllUsesWith(Loaded);
279 LI->eraseFromParent();
284 /// Convert an atomic store of a non-integral type to an integer store of the
285 /// equivelent bitwidth. We used to not support floating point or vector
286 /// atomics in the IR at all. The backends learned to deal with the bitcast
287 /// idiom because that was the only way of expressing the notion of a atomic
288 /// float or vector store. The long term plan is to teach each backend to
289 /// instruction select from the original atomic store, but as a migration
290 /// mechanism, we convert back to the old format which the backends understand.
291 /// Each backend will need individual work to recognize the new format.
292 StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
293 IRBuilder<> Builder(SI);
294 auto *M = SI->getModule();
295 Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
297 Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
299 Value *Addr = SI->getPointerOperand();
300 Type *PT = PointerType::get(NewTy,
301 Addr->getType()->getPointerAddressSpace());
302 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
304 StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
305 NewSI->setAlignment(SI->getAlignment());
306 NewSI->setVolatile(SI->isVolatile());
307 NewSI->setAtomic(SI->getOrdering(), SI->getSynchScope());
308 DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
309 SI->eraseFromParent();
313 bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
314 // This function is only called on atomic stores that are too large to be
315 // atomic if implemented as a native store. So we replace them by an
316 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
317 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
318 // It is the responsibility of the target to only signal expansion via
319 // shouldExpandAtomicRMW in cases where this is required and possible.
320 IRBuilder<> Builder(SI);
322 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
323 SI->getValueOperand(), SI->getOrdering());
324 SI->eraseFromParent();
326 // Now we have an appropriate swap instruction, lower it as usual.
327 return tryExpandAtomicRMW(AI);
330 static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
331 Value *Loaded, Value *NewVal,
332 AtomicOrdering MemOpOrder,
333 Value *&Success, Value *&NewLoaded) {
334 Value* Pair = Builder.CreateAtomicCmpXchg(
335 Addr, Loaded, NewVal, MemOpOrder,
336 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
337 Success = Builder.CreateExtractValue(Pair, 1, "success");
338 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
341 /// Emit IR to implement the given atomicrmw operation on values in registers,
342 /// returning the new value.
343 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
344 Value *Loaded, Value *Inc) {
347 case AtomicRMWInst::Xchg:
349 case AtomicRMWInst::Add:
350 return Builder.CreateAdd(Loaded, Inc, "new");
351 case AtomicRMWInst::Sub:
352 return Builder.CreateSub(Loaded, Inc, "new");
353 case AtomicRMWInst::And:
354 return Builder.CreateAnd(Loaded, Inc, "new");
355 case AtomicRMWInst::Nand:
356 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
357 case AtomicRMWInst::Or:
358 return Builder.CreateOr(Loaded, Inc, "new");
359 case AtomicRMWInst::Xor:
360 return Builder.CreateXor(Loaded, Inc, "new");
361 case AtomicRMWInst::Max:
362 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
363 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
364 case AtomicRMWInst::Min:
365 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
366 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
367 case AtomicRMWInst::UMax:
368 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
369 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
370 case AtomicRMWInst::UMin:
371 NewVal = Builder.CreateICmpULE(Loaded, Inc);
372 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
374 llvm_unreachable("Unknown atomic op");
378 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
379 switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
380 case TargetLoweringBase::AtomicExpansionKind::None:
382 case TargetLoweringBase::AtomicExpansionKind::LLSC:
383 return expandAtomicOpToLLSC(AI, AI->getPointerOperand(), AI->getOrdering(),
384 [&](IRBuilder<> &Builder, Value *Loaded) {
385 return performAtomicOp(AI->getOperation(),
387 AI->getValOperand());
389 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
390 return expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun);
392 llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
396 bool AtomicExpand::expandAtomicOpToLLSC(
397 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
398 std::function<Value *(IRBuilder<> &, Value *)> PerformOp) {
399 BasicBlock *BB = I->getParent();
400 Function *F = BB->getParent();
401 LLVMContext &Ctx = F->getContext();
403 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
405 // The standard expansion we produce is:
409 // %loaded = @load.linked(%addr)
410 // %new = some_op iN %loaded, %incr
411 // %stored = @store_conditional(%new, %addr)
412 // %try_again = icmp i32 ne %stored, 0
413 // br i1 %try_again, label %loop, label %atomicrmw.end
417 BasicBlock *ExitBB = BB->splitBasicBlock(I->getIterator(), "atomicrmw.end");
418 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
420 // This grabs the DebugLoc from I.
421 IRBuilder<> Builder(I);
423 // The split call above "helpfully" added a branch at the end of BB (to the
424 // wrong place), but we might want a fence too. It's easiest to just remove
425 // the branch entirely.
426 std::prev(BB->end())->eraseFromParent();
427 Builder.SetInsertPoint(BB);
428 Builder.CreateBr(LoopBB);
430 // Start the main loop block now that we've taken care of the preliminaries.
431 Builder.SetInsertPoint(LoopBB);
432 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
434 Value *NewVal = PerformOp(Builder, Loaded);
436 Value *StoreSuccess =
437 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
438 Value *TryAgain = Builder.CreateICmpNE(
439 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
440 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
442 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
444 I->replaceAllUsesWith(Loaded);
445 I->eraseFromParent();
450 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
451 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
452 AtomicOrdering FailureOrder = CI->getFailureOrdering();
453 Value *Addr = CI->getPointerOperand();
454 BasicBlock *BB = CI->getParent();
455 Function *F = BB->getParent();
456 LLVMContext &Ctx = F->getContext();
457 // If getInsertFencesForAtomic() returns true, then the target does not want
458 // to deal with memory orders, and emitLeading/TrailingFence should take care
459 // of everything. Otherwise, emitLeading/TrailingFence are no-op and we
460 // should preserve the ordering.
461 AtomicOrdering MemOpOrder =
462 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
464 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
466 // The full expansion we produce is:
470 // %loaded = @load.linked(%addr)
471 // %should_store = icmp eq %loaded, %desired
472 // br i1 %should_store, label %cmpxchg.trystore,
473 // label %cmpxchg.nostore
475 // %stored = @store_conditional(%new, %addr)
476 // %success = icmp eq i32 %stored, 0
477 // br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
480 // br label %cmpxchg.end
482 // @load_linked_fail_balance()?
483 // br label %cmpxchg.failure
486 // br label %cmpxchg.end
488 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
489 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
490 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
492 BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end");
493 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
494 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB);
495 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB);
496 auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
497 auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
499 // This grabs the DebugLoc from CI
500 IRBuilder<> Builder(CI);
502 // The split call above "helpfully" added a branch at the end of BB (to the
503 // wrong place), but we might want a fence too. It's easiest to just remove
504 // the branch entirely.
505 std::prev(BB->end())->eraseFromParent();
506 Builder.SetInsertPoint(BB);
507 TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
509 Builder.CreateBr(LoopBB);
511 // Start the main loop block now that we've taken care of the preliminaries.
512 Builder.SetInsertPoint(LoopBB);
513 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
515 Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
517 // If the cmpxchg doesn't actually need any ordering when it fails, we can
518 // jump straight past that fence instruction (if it exists).
519 Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
521 Builder.SetInsertPoint(TryStoreBB);
522 Value *StoreSuccess = TLI->emitStoreConditional(
523 Builder, CI->getNewValOperand(), Addr, MemOpOrder);
524 StoreSuccess = Builder.CreateICmpEQ(
525 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
526 Builder.CreateCondBr(StoreSuccess, SuccessBB,
527 CI->isWeak() ? FailureBB : LoopBB);
529 // Make sure later instructions don't get reordered with a fence if necessary.
530 Builder.SetInsertPoint(SuccessBB);
531 TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
533 Builder.CreateBr(ExitBB);
535 Builder.SetInsertPoint(NoStoreBB);
536 // In the failing case, where we don't execute the store-conditional, the
537 // target might want to balance out the load-linked with a dedicated
538 // instruction (e.g., on ARM, clearing the exclusive monitor).
539 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
540 Builder.CreateBr(FailureBB);
542 Builder.SetInsertPoint(FailureBB);
543 TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
545 Builder.CreateBr(ExitBB);
547 // Finally, we have control-flow based knowledge of whether the cmpxchg
548 // succeeded or not. We expose this to later passes by converting any
549 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
551 // Setup the builder so we can create any PHIs we need.
552 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
553 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
554 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
555 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
557 // Look for any users of the cmpxchg that are just comparing the loaded value
558 // against the desired one, and replace them with the CFG-derived version.
559 SmallVector<ExtractValueInst *, 2> PrunedInsts;
560 for (auto User : CI->users()) {
561 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
565 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
566 "weird extraction from { iN, i1 }");
568 if (EV->getIndices()[0] == 0)
569 EV->replaceAllUsesWith(Loaded);
571 EV->replaceAllUsesWith(Success);
573 PrunedInsts.push_back(EV);
576 // We can remove the instructions now we're no longer iterating through them.
577 for (auto EV : PrunedInsts)
578 EV->eraseFromParent();
580 if (!CI->use_empty()) {
581 // Some use of the full struct return that we don't understand has happened,
582 // so we've got to reconstruct it properly.
584 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
585 Res = Builder.CreateInsertValue(Res, Success, 1);
587 CI->replaceAllUsesWith(Res);
590 CI->eraseFromParent();
594 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
595 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
599 AtomicRMWInst::BinOp Op = RMWI->getOperation();
601 case AtomicRMWInst::Add:
602 case AtomicRMWInst::Sub:
603 case AtomicRMWInst::Or:
604 case AtomicRMWInst::Xor:
606 case AtomicRMWInst::And:
607 return C->isMinusOne();
608 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
614 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
615 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
616 tryExpandAtomicLoad(ResultingLoad);
622 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
623 CreateCmpXchgInstFun CreateCmpXchg) {
626 AtomicOrdering MemOpOrder =
627 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
628 Value *Addr = AI->getPointerOperand();
629 BasicBlock *BB = AI->getParent();
630 Function *F = BB->getParent();
631 LLVMContext &Ctx = F->getContext();
633 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
635 // The standard expansion we produce is:
637 // %init_loaded = load atomic iN* %addr
640 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
641 // %new = some_op iN %loaded, %incr
642 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
643 // %new_loaded = extractvalue { iN, i1 } %pair, 0
644 // %success = extractvalue { iN, i1 } %pair, 1
645 // br i1 %success, label %atomicrmw.end, label %loop
648 BasicBlock *ExitBB = BB->splitBasicBlock(AI->getIterator(), "atomicrmw.end");
649 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
651 // This grabs the DebugLoc from AI.
652 IRBuilder<> Builder(AI);
654 // The split call above "helpfully" added a branch at the end of BB (to the
655 // wrong place), but we want a load. It's easiest to just remove
656 // the branch entirely.
657 std::prev(BB->end())->eraseFromParent();
658 Builder.SetInsertPoint(BB);
659 LoadInst *InitLoaded = Builder.CreateLoad(Addr);
660 // Atomics require at least natural alignment.
661 InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits() / 8);
662 Builder.CreateBr(LoopBB);
664 // Start the main loop block now that we've taken care of the preliminaries.
665 Builder.SetInsertPoint(LoopBB);
666 PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
667 Loaded->addIncoming(InitLoaded, BB);
670 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
672 Value *NewLoaded = nullptr;
673 Value *Success = nullptr;
675 CreateCmpXchg(Builder, Addr, Loaded, NewVal, MemOpOrder,
677 assert(Success && NewLoaded);
679 Loaded->addIncoming(NewLoaded, LoopBB);
681 Builder.CreateCondBr(Success, ExitBB, LoopBB);
683 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
685 AI->replaceAllUsesWith(NewLoaded);
686 AI->eraseFromParent();