1 //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass (at IR level) to replace atomic instructions with
11 // either (intrinsic-based) load-linked/store-conditional loops or AtomicCmpXchg.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/AtomicExpandUtils.h"
16 #include "llvm/CodeGen/Passes.h"
17 #include "llvm/IR/Function.h"
18 #include "llvm/IR/IRBuilder.h"
19 #include "llvm/IR/InstIterator.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/Intrinsics.h"
22 #include "llvm/IR/Module.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Target/TargetLowering.h"
25 #include "llvm/Target/TargetMachine.h"
26 #include "llvm/Target/TargetSubtargetInfo.h"
30 #define DEBUG_TYPE "atomic-expand"
33 class AtomicExpand: public FunctionPass {
34 const TargetMachine *TM;
35 const TargetLowering *TLI;
37 static char ID; // Pass identification, replacement for typeid
38 explicit AtomicExpand(const TargetMachine *TM = nullptr)
39 : FunctionPass(ID), TM(TM), TLI(nullptr) {
40 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
43 bool runOnFunction(Function &F) override;
46 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
47 bool IsStore, bool IsLoad);
48 bool expandAtomicLoad(LoadInst *LI);
49 bool expandAtomicLoadToLL(LoadInst *LI);
50 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
51 bool expandAtomicStore(StoreInst *SI);
52 bool tryExpandAtomicRMW(AtomicRMWInst *AI);
53 bool expandAtomicRMWToLLSC(AtomicRMWInst *AI);
54 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
55 bool isIdempotentRMW(AtomicRMWInst *AI);
56 bool simplifyIdempotentRMW(AtomicRMWInst *AI);
60 char AtomicExpand::ID = 0;
61 char &llvm::AtomicExpandID = AtomicExpand::ID;
62 INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
63 "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
66 FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
67 return new AtomicExpand(TM);
70 bool AtomicExpand::runOnFunction(Function &F) {
71 if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
73 TLI = TM->getSubtargetImpl(F)->getTargetLowering();
75 SmallVector<Instruction *, 1> AtomicInsts;
77 // Changing control-flow while iterating through it is a bad idea, so gather a
78 // list of all atomic instructions before we start.
79 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
81 AtomicInsts.push_back(&*I);
84 bool MadeChange = false;
85 for (auto I : AtomicInsts) {
86 auto LI = dyn_cast<LoadInst>(I);
87 auto SI = dyn_cast<StoreInst>(I);
88 auto RMWI = dyn_cast<AtomicRMWInst>(I);
89 auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
90 assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
91 "Unknown atomic instruction");
93 auto FenceOrdering = Monotonic;
95 if (TLI->getInsertFencesForAtomic()) {
96 if (LI && isAtLeastAcquire(LI->getOrdering())) {
97 FenceOrdering = LI->getOrdering();
98 LI->setOrdering(Monotonic);
101 } else if (SI && isAtLeastRelease(SI->getOrdering())) {
102 FenceOrdering = SI->getOrdering();
103 SI->setOrdering(Monotonic);
106 } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
107 isAtLeastAcquire(RMWI->getOrdering()))) {
108 FenceOrdering = RMWI->getOrdering();
109 RMWI->setOrdering(Monotonic);
110 IsStore = IsLoad = true;
111 } else if (CASI && !TLI->hasLoadLinkedStoreConditional() &&
112 (isAtLeastRelease(CASI->getSuccessOrdering()) ||
113 isAtLeastAcquire(CASI->getSuccessOrdering()))) {
114 // If a compare and swap is lowered to LL/SC, we can do smarter fence
115 // insertion, with a stronger one on the success path than on the
116 // failure path. As a result, fence insertion is directly done by
117 // expandAtomicCmpXchg in that case.
118 FenceOrdering = CASI->getSuccessOrdering();
119 CASI->setSuccessOrdering(Monotonic);
120 CASI->setFailureOrdering(Monotonic);
121 IsStore = IsLoad = true;
124 if (FenceOrdering != Monotonic) {
125 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
129 if (LI && TLI->shouldExpandAtomicLoadInIR(LI)) {
130 MadeChange |= expandAtomicLoad(LI);
131 } else if (SI && TLI->shouldExpandAtomicStoreInIR(SI)) {
132 MadeChange |= expandAtomicStore(SI);
134 // There are two different ways of expanding RMW instructions:
135 // - into a load if it is idempotent
136 // - into a Cmpxchg/LL-SC loop otherwise
137 // we try them in that order.
139 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
142 MadeChange |= tryExpandAtomicRMW(RMWI);
144 } else if (CASI && TLI->hasLoadLinkedStoreConditional()) {
145 MadeChange |= expandAtomicCmpXchg(CASI);
151 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
152 bool IsStore, bool IsLoad) {
153 IRBuilder<> Builder(I);
155 auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
157 auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
158 // The trailing fence is emitted before the instruction instead of after
159 // because there is no easy way of setting Builder insertion point after
160 // an instruction. So we must erase it from the BB, and insert it back
161 // in the right place.
162 // We have a guard here because not every atomic operation generates a
165 TrailingFence->removeFromParent();
166 TrailingFence->insertAfter(I);
169 return (LeadingFence || TrailingFence);
172 bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
173 if (TLI->hasLoadLinkedStoreConditional())
174 return expandAtomicLoadToLL(LI);
176 return expandAtomicLoadToCmpXchg(LI);
179 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
180 IRBuilder<> Builder(LI);
182 // On some architectures, load-linked instructions are atomic for larger
183 // sizes than normal loads. For example, the only 64-bit load guaranteed
184 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
186 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
188 LI->replaceAllUsesWith(Val);
189 LI->eraseFromParent();
194 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
195 IRBuilder<> Builder(LI);
196 AtomicOrdering Order = LI->getOrdering();
197 Value *Addr = LI->getPointerOperand();
198 Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
199 Constant *DummyVal = Constant::getNullValue(Ty);
201 Value *Pair = Builder.CreateAtomicCmpXchg(
202 Addr, DummyVal, DummyVal, Order,
203 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
204 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
206 LI->replaceAllUsesWith(Loaded);
207 LI->eraseFromParent();
212 bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
213 // This function is only called on atomic stores that are too large to be
214 // atomic if implemented as a native store. So we replace them by an
215 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
216 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
217 // It is the responsibility of the target to only signal expansion via
218 // shouldExpandAtomicRMW in cases where this is required and possible.
219 IRBuilder<> Builder(SI);
221 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
222 SI->getValueOperand(), SI->getOrdering());
223 SI->eraseFromParent();
225 // Now we have an appropriate swap instruction, lower it as usual.
226 return tryExpandAtomicRMW(AI);
229 static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
230 Value *Loaded, Value *NewVal,
231 AtomicOrdering MemOpOrder,
232 Value *&Success, Value *&NewLoaded) {
233 Value* Pair = Builder.CreateAtomicCmpXchg(
234 Addr, Loaded, NewVal, MemOpOrder,
235 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
236 Success = Builder.CreateExtractValue(Pair, 1, "success");
237 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
240 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
241 switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
242 case TargetLoweringBase::AtomicRMWExpansionKind::None:
244 case TargetLoweringBase::AtomicRMWExpansionKind::LLSC: {
245 assert(TLI->hasLoadLinkedStoreConditional() &&
246 "TargetLowering requested we expand AtomicRMW instruction into "
247 "load-linked/store-conditional combos, but such instructions aren't "
250 return expandAtomicRMWToLLSC(AI);
252 case TargetLoweringBase::AtomicRMWExpansionKind::CmpXChg: {
253 return expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun);
256 llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
259 /// Emit IR to implement the given atomicrmw operation on values in registers,
260 /// returning the new value.
261 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
262 Value *Loaded, Value *Inc) {
265 case AtomicRMWInst::Xchg:
267 case AtomicRMWInst::Add:
268 return Builder.CreateAdd(Loaded, Inc, "new");
269 case AtomicRMWInst::Sub:
270 return Builder.CreateSub(Loaded, Inc, "new");
271 case AtomicRMWInst::And:
272 return Builder.CreateAnd(Loaded, Inc, "new");
273 case AtomicRMWInst::Nand:
274 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
275 case AtomicRMWInst::Or:
276 return Builder.CreateOr(Loaded, Inc, "new");
277 case AtomicRMWInst::Xor:
278 return Builder.CreateXor(Loaded, Inc, "new");
279 case AtomicRMWInst::Max:
280 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
281 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
282 case AtomicRMWInst::Min:
283 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
284 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
285 case AtomicRMWInst::UMax:
286 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
287 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
288 case AtomicRMWInst::UMin:
289 NewVal = Builder.CreateICmpULE(Loaded, Inc);
290 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
292 llvm_unreachable("Unknown atomic op");
296 bool AtomicExpand::expandAtomicRMWToLLSC(AtomicRMWInst *AI) {
297 AtomicOrdering MemOpOrder = AI->getOrdering();
298 Value *Addr = AI->getPointerOperand();
299 BasicBlock *BB = AI->getParent();
300 Function *F = BB->getParent();
301 LLVMContext &Ctx = F->getContext();
303 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
305 // The standard expansion we produce is:
309 // %loaded = @load.linked(%addr)
310 // %new = some_op iN %loaded, %incr
311 // %stored = @store_conditional(%new, %addr)
312 // %try_again = icmp i32 ne %stored, 0
313 // br i1 %try_again, label %loop, label %atomicrmw.end
317 BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
318 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
320 // This grabs the DebugLoc from AI.
321 IRBuilder<> Builder(AI);
323 // The split call above "helpfully" added a branch at the end of BB (to the
324 // wrong place), but we might want a fence too. It's easiest to just remove
325 // the branch entirely.
326 std::prev(BB->end())->eraseFromParent();
327 Builder.SetInsertPoint(BB);
328 Builder.CreateBr(LoopBB);
330 // Start the main loop block now that we've taken care of the preliminaries.
331 Builder.SetInsertPoint(LoopBB);
332 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
335 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
337 Value *StoreSuccess =
338 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
339 Value *TryAgain = Builder.CreateICmpNE(
340 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
341 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
343 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
345 AI->replaceAllUsesWith(Loaded);
346 AI->eraseFromParent();
351 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
352 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
353 AtomicOrdering FailureOrder = CI->getFailureOrdering();
354 Value *Addr = CI->getPointerOperand();
355 BasicBlock *BB = CI->getParent();
356 Function *F = BB->getParent();
357 LLVMContext &Ctx = F->getContext();
358 // If getInsertFencesForAtomic() returns true, then the target does not want
359 // to deal with memory orders, and emitLeading/TrailingFence should take care
360 // of everything. Otherwise, emitLeading/TrailingFence are no-op and we
361 // should preserve the ordering.
362 AtomicOrdering MemOpOrder =
363 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
365 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
367 // The full expansion we produce is:
371 // %loaded = @load.linked(%addr)
372 // %should_store = icmp eq %loaded, %desired
373 // br i1 %should_store, label %cmpxchg.trystore,
374 // label %cmpxchg.failure
376 // %stored = @store_conditional(%new, %addr)
377 // %success = icmp eq i32 %stored, 0
378 // br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
381 // br label %cmpxchg.end
384 // br label %cmpxchg.end
386 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
387 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
388 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
390 BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
391 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
392 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, FailureBB);
393 auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
394 auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
396 // This grabs the DebugLoc from CI
397 IRBuilder<> Builder(CI);
399 // The split call above "helpfully" added a branch at the end of BB (to the
400 // wrong place), but we might want a fence too. It's easiest to just remove
401 // the branch entirely.
402 std::prev(BB->end())->eraseFromParent();
403 Builder.SetInsertPoint(BB);
404 TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
406 Builder.CreateBr(LoopBB);
408 // Start the main loop block now that we've taken care of the preliminaries.
409 Builder.SetInsertPoint(LoopBB);
410 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
412 Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
414 // If the cmpxchg doesn't actually need any ordering when it fails, we can
415 // jump straight past that fence instruction (if it exists).
416 Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
418 Builder.SetInsertPoint(TryStoreBB);
419 Value *StoreSuccess = TLI->emitStoreConditional(
420 Builder, CI->getNewValOperand(), Addr, MemOpOrder);
421 StoreSuccess = Builder.CreateICmpEQ(
422 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
423 Builder.CreateCondBr(StoreSuccess, SuccessBB,
424 CI->isWeak() ? FailureBB : LoopBB);
426 // Make sure later instructions don't get reordered with a fence if necessary.
427 Builder.SetInsertPoint(SuccessBB);
428 TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
430 Builder.CreateBr(ExitBB);
432 Builder.SetInsertPoint(FailureBB);
433 TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
435 Builder.CreateBr(ExitBB);
437 // Finally, we have control-flow based knowledge of whether the cmpxchg
438 // succeeded or not. We expose this to later passes by converting any
439 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
441 // Setup the builder so we can create any PHIs we need.
442 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
443 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
444 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
445 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
447 // Look for any users of the cmpxchg that are just comparing the loaded value
448 // against the desired one, and replace them with the CFG-derived version.
449 SmallVector<ExtractValueInst *, 2> PrunedInsts;
450 for (auto User : CI->users()) {
451 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
455 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
456 "weird extraction from { iN, i1 }");
458 if (EV->getIndices()[0] == 0)
459 EV->replaceAllUsesWith(Loaded);
461 EV->replaceAllUsesWith(Success);
463 PrunedInsts.push_back(EV);
466 // We can remove the instructions now we're no longer iterating through them.
467 for (auto EV : PrunedInsts)
468 EV->eraseFromParent();
470 if (!CI->use_empty()) {
471 // Some use of the full struct return that we don't understand has happened,
472 // so we've got to reconstruct it properly.
474 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
475 Res = Builder.CreateInsertValue(Res, Success, 1);
477 CI->replaceAllUsesWith(Res);
480 CI->eraseFromParent();
484 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
485 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
489 AtomicRMWInst::BinOp Op = RMWI->getOperation();
491 case AtomicRMWInst::Add:
492 case AtomicRMWInst::Sub:
493 case AtomicRMWInst::Or:
494 case AtomicRMWInst::Xor:
496 case AtomicRMWInst::And:
497 return C->isMinusOne();
498 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
504 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
505 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
506 if (TLI->shouldExpandAtomicLoadInIR(ResultingLoad))
507 expandAtomicLoad(ResultingLoad);
513 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
514 CreateCmpXchgInstFun CreateCmpXchg) {
517 AtomicOrdering MemOpOrder =
518 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
519 Value *Addr = AI->getPointerOperand();
520 BasicBlock *BB = AI->getParent();
521 Function *F = BB->getParent();
522 LLVMContext &Ctx = F->getContext();
524 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
526 // The standard expansion we produce is:
528 // %init_loaded = load atomic iN* %addr
531 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
532 // %new = some_op iN %loaded, %incr
533 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
534 // %new_loaded = extractvalue { iN, i1 } %pair, 0
535 // %success = extractvalue { iN, i1 } %pair, 1
536 // br i1 %success, label %atomicrmw.end, label %loop
539 BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
540 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
542 // This grabs the DebugLoc from AI.
543 IRBuilder<> Builder(AI);
545 // The split call above "helpfully" added a branch at the end of BB (to the
546 // wrong place), but we want a load. It's easiest to just remove
547 // the branch entirely.
548 std::prev(BB->end())->eraseFromParent();
549 Builder.SetInsertPoint(BB);
550 LoadInst *InitLoaded = Builder.CreateLoad(Addr);
551 // Atomics require at least natural alignment.
552 InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits());
553 Builder.CreateBr(LoopBB);
555 // Start the main loop block now that we've taken care of the preliminaries.
556 Builder.SetInsertPoint(LoopBB);
557 PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
558 Loaded->addIncoming(InitLoaded, BB);
561 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
563 Value *NewLoaded = nullptr;
564 Value *Success = nullptr;
566 CreateCmpXchg(Builder, Addr, Loaded, NewVal, MemOpOrder,
568 assert(Success && NewLoaded);
570 Loaded->addIncoming(NewLoaded, LoopBB);
572 Builder.CreateCondBr(Success, ExitBB, LoopBB);
574 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
576 AI->replaceAllUsesWith(NewLoaded);
577 AI->eraseFromParent();