1 //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass (at IR level) to replace atomic instructions with
11 // target specific instruction which implement the same semantics in a way
12 // which better fits the target backend. This can include the use of either
13 // (intrinsic-based) load-linked/store-conditional loops, AtomicCmpXchg, or
16 //===----------------------------------------------------------------------===//
18 #include "TaintRelaxedAtomicsUtils.h"
19 #include "llvm/ADT/SetOperations.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/AtomicExpandUtils.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/IRBuilder.h"
28 #include "llvm/IR/InstIterator.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/IR/NoFolder.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetLowering.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetSubtargetInfo.h"
38 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
42 #define DEBUG_TYPE "atomic-expand"
45 class AtomicExpand: public FunctionPass {
46 const TargetMachine *TM;
47 const TargetLowering *TLI;
49 static char ID; // Pass identification, replacement for typeid
50 explicit AtomicExpand(const TargetMachine *TM = nullptr)
51 : FunctionPass(ID), TM(TM), TLI(nullptr) {
52 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
55 bool runOnFunction(Function &F) override;
58 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
59 bool IsStore, bool IsLoad);
60 IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
61 LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
62 bool tryExpandAtomicLoad(LoadInst *LI);
63 bool expandAtomicLoadToLL(LoadInst *LI);
64 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
65 StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
66 bool expandAtomicStore(StoreInst *SI);
67 bool tryExpandAtomicRMW(AtomicRMWInst *AI);
68 bool expandAtomicOpToLLSC(
69 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
70 std::function<Value *(IRBuilder<> &, Value *)> PerformOp);
71 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
72 bool isIdempotentRMW(AtomicRMWInst *AI);
73 bool simplifyIdempotentRMW(AtomicRMWInst *AI);
77 // If 'LI' is a relaxed load, and it is immediately followed by a
78 // atomic read-modify-write that has acq_rel parameter, we don't have to do
79 // anything since the rmw serves as a natural barrier.
80 void MarkRelaxedLoadBeforeAcqrelRMW(LoadInst* LI) {
81 auto* BB = LI->getParent();
82 auto BBI = LI->getIterator();
83 for (BBI++; BBI != BB->end(); BBI++) {
84 Instruction* CurInst = &*BBI;
88 if (!CurInst->isAtomic()) {
91 auto* RMW = dyn_cast<AtomicRMWInst>(CurInst);
95 if (RMW->getOrdering() == AcquireRelease ||
96 RMW->getOrdering() == SequentiallyConsistent) {
97 LI->setHasSubsequentAcqlRMW(true);
104 char AtomicExpand::ID = 0;
105 char &llvm::AtomicExpandID = AtomicExpand::ID;
106 INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
107 "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
110 FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
111 return new AtomicExpand(TM);
114 bool AtomicExpand::runOnFunction(Function &F) {
115 if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
117 TLI = TM->getSubtargetImpl(F)->getTargetLowering();
119 SmallVector<Instruction *, 1> AtomicInsts;
120 SmallVector<LoadInst*, 1> MonotonicLoadInsts;
122 // Changing control-flow while iterating through it is a bad idea, so gather a
123 // list of all atomic instructions before we start.
124 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
125 // XXX-update: For relaxed loads, change them to acquire. This includes
126 // relaxed loads, relaxed atomic RMW & relaxed atomic compare exchange.
128 switch (I->getOpcode()) {
129 case Instruction::AtomicCmpXchg: {
130 // XXX-comment: AtomicCmpXchg in AArch64 will be translated to a
131 // conditional branch that contains the value of the load anyway, so
132 // we don't need to do anything.
134 auto* CmpXchg = dyn_cast<AtomicCmpXchgInst>(&*I);
135 auto SuccOrdering = CmpXchg->getSuccessOrdering();
136 if (SuccOrdering == Monotonic) {
137 CmpXchg->setSuccessOrdering(Acquire);
138 } else if (SuccOrdering == Release) {
139 CmpXchg->setSuccessOrdering(AcquireRelease);
144 case Instruction::AtomicRMW: {
145 // XXX-comment: Similar to AtomicCmpXchg. These instructions in
146 // AArch64 will be translated to a loop whose condition depends on the
147 // store status, which further depends on the load value.
149 auto* RMW = dyn_cast<AtomicRMWInst>(&*I);
150 if (RMW->getOrdering() == Monotonic) {
151 RMW->setOrdering(Acquire);
156 case Instruction::Load: {
157 auto* LI = dyn_cast<LoadInst>(&*I);
158 if (LI->getOrdering() == Monotonic) {
160 DEBUG(dbgs() << "Transforming relaxed loads to acquire loads: "
162 LI->setOrdering(Acquire);
164 // MonotonicLoadInsts.push_back(LI);
165 MarkRelaxedLoadBeforeAcqrelRMW(LI);
173 AtomicInsts.push_back(&*I);
177 bool MadeChange = false;
178 for (auto I : AtomicInsts) {
179 auto LI = dyn_cast<LoadInst>(I);
180 auto SI = dyn_cast<StoreInst>(I);
181 auto RMWI = dyn_cast<AtomicRMWInst>(I);
182 auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
183 assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
184 "Unknown atomic instruction");
186 auto FenceOrdering = Monotonic;
187 bool IsStore, IsLoad;
188 if (TLI->getInsertFencesForAtomic()) {
189 if (LI && isAtLeastAcquire(LI->getOrdering())) {
190 FenceOrdering = LI->getOrdering();
191 // AddFakeConditionalBranch(
194 } else if (SI && isAtLeastRelease(SI->getOrdering())) {
195 FenceOrdering = SI->getOrdering();
196 SI->setOrdering(Monotonic);
199 } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
200 isAtLeastAcquire(RMWI->getOrdering()))) {
201 FenceOrdering = RMWI->getOrdering();
202 RMWI->setOrdering(Monotonic);
203 IsStore = IsLoad = true;
204 } else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
205 (isAtLeastRelease(CASI->getSuccessOrdering()) ||
206 isAtLeastAcquire(CASI->getSuccessOrdering()))) {
207 // If a compare and swap is lowered to LL/SC, we can do smarter fence
208 // insertion, with a stronger one on the success path than on the
209 // failure path. As a result, fence insertion is directly done by
210 // expandAtomicCmpXchg in that case.
211 FenceOrdering = CASI->getSuccessOrdering();
212 CASI->setSuccessOrdering(Monotonic);
213 CASI->setFailureOrdering(Monotonic);
214 IsStore = IsLoad = true;
217 if (FenceOrdering != Monotonic) {
218 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
223 if (LI->getType()->isFloatingPointTy()) {
224 // TODO: add a TLI hook to control this so that each target can
225 // convert to lowering the original type one at a time.
226 LI = convertAtomicLoadToIntegerType(LI);
227 assert(LI->getType()->isIntegerTy() && "invariant broken");
231 MadeChange |= tryExpandAtomicLoad(LI);
233 if (SI->getValueOperand()->getType()->isFloatingPointTy()) {
234 // TODO: add a TLI hook to control this so that each target can
235 // convert to lowering the original type one at a time.
236 SI = convertAtomicStoreToIntegerType(SI);
237 assert(SI->getValueOperand()->getType()->isIntegerTy() &&
242 if (TLI->shouldExpandAtomicStoreInIR(SI))
243 MadeChange |= expandAtomicStore(SI);
245 // There are two different ways of expanding RMW instructions:
246 // - into a load if it is idempotent
247 // - into a Cmpxchg/LL-SC loop otherwise
248 // we try them in that order.
250 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
253 MadeChange |= tryExpandAtomicRMW(RMWI);
255 } else if (CASI && TLI->shouldExpandAtomicCmpXchgInIR(CASI)) {
256 MadeChange |= expandAtomicCmpXchg(CASI);
263 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
264 bool IsStore, bool IsLoad) {
265 IRBuilder<> Builder(I);
267 auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
269 auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
270 // The trailing fence is emitted before the instruction instead of after
271 // because there is no easy way of setting Builder insertion point after
272 // an instruction. So we must erase it from the BB, and insert it back
273 // in the right place.
274 // We have a guard here because not every atomic operation generates a
277 TrailingFence->removeFromParent();
278 TrailingFence->insertAfter(I);
281 return (LeadingFence || TrailingFence);
284 /// Get the iX type with the same bitwidth as T.
285 IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
286 const DataLayout &DL) {
287 EVT VT = TLI->getValueType(DL, T);
288 unsigned BitWidth = VT.getStoreSizeInBits();
289 assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
290 return IntegerType::get(T->getContext(), BitWidth);
293 /// Convert an atomic load of a non-integral type to an integer load of the
294 /// equivelent bitwidth. See the function comment on
295 /// convertAtomicStoreToIntegerType for background.
296 LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
297 auto *M = LI->getModule();
298 Type *NewTy = getCorrespondingIntegerType(LI->getType(),
301 IRBuilder<> Builder(LI);
303 Value *Addr = LI->getPointerOperand();
304 Type *PT = PointerType::get(NewTy,
305 Addr->getType()->getPointerAddressSpace());
306 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
308 auto *NewLI = Builder.CreateLoad(NewAddr);
309 NewLI->setAlignment(LI->getAlignment());
310 NewLI->setVolatile(LI->isVolatile());
311 NewLI->setAtomic(LI->getOrdering(), LI->getSynchScope());
312 DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
314 Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
315 LI->replaceAllUsesWith(NewVal);
316 LI->eraseFromParent();
320 bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
321 switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
322 case TargetLoweringBase::AtomicExpansionKind::None:
324 case TargetLoweringBase::AtomicExpansionKind::LLSC:
325 return expandAtomicOpToLLSC(
326 LI, LI->getPointerOperand(), LI->getOrdering(),
327 [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; });
328 case TargetLoweringBase::AtomicExpansionKind::LLOnly:
329 return expandAtomicLoadToLL(LI);
330 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
331 return expandAtomicLoadToCmpXchg(LI);
333 llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
336 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
337 IRBuilder<> Builder(LI);
339 // On some architectures, load-linked instructions are atomic for larger
340 // sizes than normal loads. For example, the only 64-bit load guaranteed
341 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
343 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
344 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
346 LI->replaceAllUsesWith(Val);
347 LI->eraseFromParent();
352 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
353 IRBuilder<> Builder(LI);
354 AtomicOrdering Order = LI->getOrdering();
355 Value *Addr = LI->getPointerOperand();
356 Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
357 Constant *DummyVal = Constant::getNullValue(Ty);
359 Value *Pair = Builder.CreateAtomicCmpXchg(
360 Addr, DummyVal, DummyVal, Order,
361 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
362 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
364 LI->replaceAllUsesWith(Loaded);
365 LI->eraseFromParent();
370 /// Convert an atomic store of a non-integral type to an integer store of the
371 /// equivelent bitwidth. We used to not support floating point or vector
372 /// atomics in the IR at all. The backends learned to deal with the bitcast
373 /// idiom because that was the only way of expressing the notion of a atomic
374 /// float or vector store. The long term plan is to teach each backend to
375 /// instruction select from the original atomic store, but as a migration
376 /// mechanism, we convert back to the old format which the backends understand.
377 /// Each backend will need individual work to recognize the new format.
378 StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
379 IRBuilder<> Builder(SI);
380 auto *M = SI->getModule();
381 Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
383 Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
385 Value *Addr = SI->getPointerOperand();
386 Type *PT = PointerType::get(NewTy,
387 Addr->getType()->getPointerAddressSpace());
388 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
390 StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
391 NewSI->setAlignment(SI->getAlignment());
392 NewSI->setVolatile(SI->isVolatile());
393 NewSI->setAtomic(SI->getOrdering(), SI->getSynchScope());
394 DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
395 SI->eraseFromParent();
399 bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
400 // This function is only called on atomic stores that are too large to be
401 // atomic if implemented as a native store. So we replace them by an
402 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
403 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
404 // It is the responsibility of the target to only signal expansion via
405 // shouldExpandAtomicRMW in cases where this is required and possible.
406 IRBuilder<> Builder(SI);
408 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
409 SI->getValueOperand(), SI->getOrdering());
410 SI->eraseFromParent();
412 // Now we have an appropriate swap instruction, lower it as usual.
413 return tryExpandAtomicRMW(AI);
416 static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
417 Value *Loaded, Value *NewVal,
418 AtomicOrdering MemOpOrder,
419 Value *&Success, Value *&NewLoaded) {
420 Value* Pair = Builder.CreateAtomicCmpXchg(
421 Addr, Loaded, NewVal, MemOpOrder,
422 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
423 Success = Builder.CreateExtractValue(Pair, 1, "success");
424 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
427 /// Emit IR to implement the given atomicrmw operation on values in registers,
428 /// returning the new value.
429 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
430 Value *Loaded, Value *Inc) {
433 case AtomicRMWInst::Xchg:
435 case AtomicRMWInst::Add:
436 return Builder.CreateAdd(Loaded, Inc, "new");
437 case AtomicRMWInst::Sub:
438 return Builder.CreateSub(Loaded, Inc, "new");
439 case AtomicRMWInst::And:
440 return Builder.CreateAnd(Loaded, Inc, "new");
441 case AtomicRMWInst::Nand:
442 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
443 case AtomicRMWInst::Or:
444 return Builder.CreateOr(Loaded, Inc, "new");
445 case AtomicRMWInst::Xor:
446 return Builder.CreateXor(Loaded, Inc, "new");
447 case AtomicRMWInst::Max:
448 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
449 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
450 case AtomicRMWInst::Min:
451 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
452 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
453 case AtomicRMWInst::UMax:
454 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
455 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
456 case AtomicRMWInst::UMin:
457 NewVal = Builder.CreateICmpULE(Loaded, Inc);
458 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
460 llvm_unreachable("Unknown atomic op");
464 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
465 switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
466 case TargetLoweringBase::AtomicExpansionKind::None:
468 case TargetLoweringBase::AtomicExpansionKind::LLSC:
469 return expandAtomicOpToLLSC(AI, AI->getPointerOperand(), AI->getOrdering(),
470 [&](IRBuilder<> &Builder, Value *Loaded) {
471 return performAtomicOp(AI->getOperation(),
473 AI->getValOperand());
475 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
476 return expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun);
478 llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
482 bool AtomicExpand::expandAtomicOpToLLSC(
483 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
484 std::function<Value *(IRBuilder<> &, Value *)> PerformOp) {
485 BasicBlock *BB = I->getParent();
486 Function *F = BB->getParent();
487 LLVMContext &Ctx = F->getContext();
489 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
491 // The standard expansion we produce is:
495 // %loaded = @load.linked(%addr)
496 // %new = some_op iN %loaded, %incr
497 // %stored = @store_conditional(%new, %addr)
498 // %try_again = icmp i32 ne %stored, 0
499 // br i1 %try_again, label %loop, label %atomicrmw.end
503 BasicBlock *ExitBB = BB->splitBasicBlock(I->getIterator(), "atomicrmw.end");
504 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
506 // This grabs the DebugLoc from I.
507 IRBuilder<> Builder(I);
509 // The split call above "helpfully" added a branch at the end of BB (to the
510 // wrong place), but we might want a fence too. It's easiest to just remove
511 // the branch entirely.
512 std::prev(BB->end())->eraseFromParent();
513 Builder.SetInsertPoint(BB);
514 Builder.CreateBr(LoopBB);
516 // Start the main loop block now that we've taken care of the preliminaries.
517 Builder.SetInsertPoint(LoopBB);
518 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
520 // XXX-update: For relaxed RMWs (i.e., fetch_* operations), we still need to
521 // taint the load part. However, we only need to taint those whose results are
522 // not immediately used by a conditional branch or a store address.
523 Value* StoreAddr = Addr;
524 auto* LoadedPartInst = dyn_cast<Instruction>(Loaded);
525 assert(LoadedPartInst && "Load part of RMW should be an instruction!");
526 if (MemOpOrder != Acquire && MemOpOrder != AcquireRelease &&
527 MemOpOrder != SequentiallyConsistent) {
528 // Also check whether the result is used immediately. If so, taint the
529 // address of the upcoming store-exclusive.
530 if (NeedExtraConstraints(I)) {
531 StoreAddr = taintRMWStoreAddressWithLoadPart(Builder, Addr, LoadedPartInst);
535 Value *NewVal = PerformOp(Builder, Loaded);
537 Value *StoreSuccess =
538 TLI->emitStoreConditional(Builder, NewVal, StoreAddr, MemOpOrder);
539 Value *TryAgain = Builder.CreateICmpNE(
540 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
541 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
543 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
545 I->replaceAllUsesWith(Loaded);
546 I->eraseFromParent();
551 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
552 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
553 AtomicOrdering FailureOrder = CI->getFailureOrdering();
554 Value *Addr = CI->getPointerOperand();
555 BasicBlock *BB = CI->getParent();
556 Function *F = BB->getParent();
557 LLVMContext &Ctx = F->getContext();
558 // If getInsertFencesForAtomic() returns true, then the target does not want
559 // to deal with memory orders, and emitLeading/TrailingFence should take care
560 // of everything. Otherwise, emitLeading/TrailingFence are no-op and we
561 // should preserve the ordering.
562 AtomicOrdering MemOpOrder =
563 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
565 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
567 // The full expansion we produce is:
571 // %loaded = @load.linked(%addr)
572 // %should_store = icmp eq %loaded, %desired
573 // br i1 %should_store, label %cmpxchg.trystore,
574 // label %cmpxchg.nostore
576 // %stored = @store_conditional(%new, %addr)
577 // %success = icmp eq i32 %stored, 0
578 // br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
581 // br label %cmpxchg.end
583 // @load_linked_fail_balance()?
584 // br label %cmpxchg.failure
587 // br label %cmpxchg.end
589 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
590 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
591 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
593 BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end");
594 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
595 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB);
596 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB);
597 auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
598 auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
600 // This grabs the DebugLoc from CI
601 IRBuilder<> Builder(CI);
603 // The split call above "helpfully" added a branch at the end of BB (to the
604 // wrong place), but we might want a fence too. It's easiest to just remove
605 // the branch entirely.
606 std::prev(BB->end())->eraseFromParent();
607 Builder.SetInsertPoint(BB);
608 TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
610 Builder.CreateBr(LoopBB);
612 // Start the main loop block now that we've taken care of the preliminaries.
613 Builder.SetInsertPoint(LoopBB);
614 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
616 Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
618 // If the cmpxchg doesn't actually need any ordering when it fails, we can
619 // jump straight past that fence instruction (if it exists).
620 Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
622 Builder.SetInsertPoint(TryStoreBB);
623 Value *StoreSuccess = TLI->emitStoreConditional(
624 Builder, CI->getNewValOperand(), Addr, MemOpOrder);
625 StoreSuccess = Builder.CreateICmpEQ(
626 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
627 Builder.CreateCondBr(StoreSuccess, SuccessBB,
628 CI->isWeak() ? FailureBB : LoopBB);
630 // Make sure later instructions don't get reordered with a fence if necessary.
631 Builder.SetInsertPoint(SuccessBB);
632 TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
634 Builder.CreateBr(ExitBB);
636 Builder.SetInsertPoint(NoStoreBB);
637 // In the failing case, where we don't execute the store-conditional, the
638 // target might want to balance out the load-linked with a dedicated
639 // instruction (e.g., on ARM, clearing the exclusive monitor).
640 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
641 Builder.CreateBr(FailureBB);
643 Builder.SetInsertPoint(FailureBB);
644 TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
646 Builder.CreateBr(ExitBB);
648 // Finally, we have control-flow based knowledge of whether the cmpxchg
649 // succeeded or not. We expose this to later passes by converting any
650 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
652 // Setup the builder so we can create any PHIs we need.
653 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
654 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
655 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
656 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
658 // Look for any users of the cmpxchg that are just comparing the loaded value
659 // against the desired one, and replace them with the CFG-derived version.
660 SmallVector<ExtractValueInst *, 2> PrunedInsts;
661 for (auto User : CI->users()) {
662 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
666 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
667 "weird extraction from { iN, i1 }");
669 if (EV->getIndices()[0] == 0)
670 EV->replaceAllUsesWith(Loaded);
672 EV->replaceAllUsesWith(Success);
674 PrunedInsts.push_back(EV);
677 // We can remove the instructions now we're no longer iterating through them.
678 for (auto EV : PrunedInsts)
679 EV->eraseFromParent();
681 if (!CI->use_empty()) {
682 // Some use of the full struct return that we don't understand has happened,
683 // so we've got to reconstruct it properly.
685 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
686 Res = Builder.CreateInsertValue(Res, Success, 1);
688 CI->replaceAllUsesWith(Res);
691 CI->eraseFromParent();
695 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
696 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
700 AtomicRMWInst::BinOp Op = RMWI->getOperation();
702 case AtomicRMWInst::Add:
703 case AtomicRMWInst::Sub:
704 case AtomicRMWInst::Or:
705 case AtomicRMWInst::Xor:
707 case AtomicRMWInst::And:
708 return C->isMinusOne();
709 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
715 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
716 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
717 tryExpandAtomicLoad(ResultingLoad);
723 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
724 CreateCmpXchgInstFun CreateCmpXchg) {
727 AtomicOrdering MemOpOrder =
728 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
729 Value *Addr = AI->getPointerOperand();
730 BasicBlock *BB = AI->getParent();
731 Function *F = BB->getParent();
732 LLVMContext &Ctx = F->getContext();
734 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
736 // The standard expansion we produce is:
738 // %init_loaded = load atomic iN* %addr
741 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
742 // %new = some_op iN %loaded, %incr
743 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
744 // %new_loaded = extractvalue { iN, i1 } %pair, 0
745 // %success = extractvalue { iN, i1 } %pair, 1
746 // br i1 %success, label %atomicrmw.end, label %loop
749 BasicBlock *ExitBB = BB->splitBasicBlock(AI->getIterator(), "atomicrmw.end");
750 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
752 // This grabs the DebugLoc from AI.
753 IRBuilder<> Builder(AI);
755 // The split call above "helpfully" added a branch at the end of BB (to the
756 // wrong place), but we want a load. It's easiest to just remove
757 // the branch entirely.
758 std::prev(BB->end())->eraseFromParent();
759 Builder.SetInsertPoint(BB);
760 LoadInst *InitLoaded = Builder.CreateLoad(Addr);
761 // Atomics require at least natural alignment.
762 InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits() / 8);
763 Builder.CreateBr(LoopBB);
765 // Start the main loop block now that we've taken care of the preliminaries.
766 Builder.SetInsertPoint(LoopBB);
767 PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
768 Loaded->addIncoming(InitLoaded, BB);
771 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
773 Value *NewLoaded = nullptr;
774 Value *Success = nullptr;
776 CreateCmpXchg(Builder, Addr, Loaded, NewVal, MemOpOrder,
778 assert(Success && NewLoaded);
780 Loaded->addIncoming(NewLoaded, LoopBB);
782 Builder.CreateCondBr(Success, ExitBB, LoopBB);
784 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
786 AI->replaceAllUsesWith(NewLoaded);
787 AI->eraseFromParent();