1 //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass (at IR level) to replace atomic instructions with
11 // either (intrinsic-based) load-linked/store-conditional loops or AtomicCmpXchg.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/Passes.h"
16 #include "llvm/IR/Function.h"
17 #include "llvm/IR/IRBuilder.h"
18 #include "llvm/IR/InstIterator.h"
19 #include "llvm/IR/Instructions.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/IR/Module.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetSubtargetInfo.h"
29 #define DEBUG_TYPE "atomic-expand"
32 class AtomicExpand: public FunctionPass {
33 const TargetMachine *TM;
35 static char ID; // Pass identification, replacement for typeid
36 explicit AtomicExpand(const TargetMachine *TM = nullptr)
37 : FunctionPass(ID), TM(TM) {
38 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
41 bool runOnFunction(Function &F) override;
44 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
45 bool IsStore, bool IsLoad);
46 bool expandAtomicLoad(LoadInst *LI);
47 bool expandAtomicLoadToLL(LoadInst *LI);
48 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
49 bool expandAtomicStore(StoreInst *SI);
50 bool expandAtomicRMW(AtomicRMWInst *AI);
51 bool expandAtomicRMWToLLSC(AtomicRMWInst *AI);
52 bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI);
53 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
57 char AtomicExpand::ID = 0;
58 char &llvm::AtomicExpandID = AtomicExpand::ID;
59 INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
60 "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
63 FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
64 return new AtomicExpand(TM);
67 bool AtomicExpand::runOnFunction(Function &F) {
68 if (!TM || !TM->getSubtargetImpl()->enableAtomicExpand())
70 auto TargetLowering = TM->getSubtargetImpl()->getTargetLowering();
72 SmallVector<Instruction *, 1> AtomicInsts;
74 // Changing control-flow while iterating through it is a bad idea, so gather a
75 // list of all atomic instructions before we start.
76 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
78 AtomicInsts.push_back(&*I);
81 bool MadeChange = false;
82 for (auto I : AtomicInsts) {
83 auto LI = dyn_cast<LoadInst>(I);
84 auto SI = dyn_cast<StoreInst>(I);
85 auto RMWI = dyn_cast<AtomicRMWInst>(I);
86 auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
87 assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
88 "Unknown atomic instruction");
90 auto FenceOrdering = Monotonic;
92 if (TargetLowering->getInsertFencesForAtomic()) {
93 if (LI && isAtLeastAcquire(LI->getOrdering())) {
94 FenceOrdering = LI->getOrdering();
95 LI->setOrdering(Monotonic);
98 } else if (SI && isAtLeastRelease(SI->getOrdering())) {
99 FenceOrdering = SI->getOrdering();
100 SI->setOrdering(Monotonic);
103 } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
104 isAtLeastAcquire(RMWI->getOrdering()))) {
105 FenceOrdering = RMWI->getOrdering();
106 RMWI->setOrdering(Monotonic);
107 IsStore = IsLoad = true;
108 } else if (CASI && !TargetLowering->hasLoadLinkedStoreConditional() &&
109 (isAtLeastRelease(CASI->getSuccessOrdering()) ||
110 isAtLeastAcquire(CASI->getSuccessOrdering()))) {
111 // If a compare and swap is lowered to LL/SC, we can do smarter fence
112 // insertion, with a stronger one on the success path than on the
113 // failure path. As a result, fence insertion is directly done by
114 // expandAtomicCmpXchg in that case.
115 FenceOrdering = CASI->getSuccessOrdering();
116 CASI->setSuccessOrdering(Monotonic);
117 CASI->setFailureOrdering(Monotonic);
118 IsStore = IsLoad = true;
121 if (FenceOrdering != Monotonic) {
122 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
126 if (LI && TargetLowering->shouldExpandAtomicLoadInIR(LI)) {
127 MadeChange |= expandAtomicLoad(LI);
128 } else if (SI && TargetLowering->shouldExpandAtomicStoreInIR(SI)) {
129 MadeChange |= expandAtomicStore(SI);
130 } else if (RMWI && TargetLowering->shouldExpandAtomicRMWInIR(RMWI)) {
131 MadeChange |= expandAtomicRMW(RMWI);
132 } else if (CASI && TargetLowering->hasLoadLinkedStoreConditional()) {
133 MadeChange |= expandAtomicCmpXchg(CASI);
139 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
140 bool IsStore, bool IsLoad) {
141 IRBuilder<> Builder(I);
144 TM->getSubtargetImpl()->getTargetLowering()->emitLeadingFence(
145 Builder, Order, IsStore, IsLoad);
148 TM->getSubtargetImpl()->getTargetLowering()->emitTrailingFence(
149 Builder, Order, IsStore, IsLoad);
150 // The trailing fence is emitted before the instruction instead of after
151 // because there is no easy way of setting Builder insertion point after
152 // an instruction. So we must erase it from the BB, and insert it back
153 // in the right place.
154 // We have a guard here because not every atomic operation generates a
157 TrailingFence->removeFromParent();
158 TrailingFence->insertAfter(I);
161 return (LeadingFence || TrailingFence);
164 bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
165 if (TM->getSubtargetImpl()
166 ->getTargetLowering()
167 ->hasLoadLinkedStoreConditional())
168 return expandAtomicLoadToLL(LI);
170 return expandAtomicLoadToCmpXchg(LI);
173 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
174 auto TLI = TM->getSubtargetImpl()->getTargetLowering();
175 IRBuilder<> Builder(LI);
177 // On some architectures, load-linked instructions are atomic for larger
178 // sizes than normal loads. For example, the only 64-bit load guaranteed
179 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
181 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
183 LI->replaceAllUsesWith(Val);
184 LI->eraseFromParent();
189 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
190 IRBuilder<> Builder(LI);
191 AtomicOrdering Order = LI->getOrdering();
192 Value *Addr = LI->getPointerOperand();
193 Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
194 Constant *DummyVal = Constant::getNullValue(Ty);
196 Value *Pair = Builder.CreateAtomicCmpXchg(
197 Addr, DummyVal, DummyVal, Order,
198 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
199 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
201 LI->replaceAllUsesWith(Loaded);
202 LI->eraseFromParent();
207 bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
208 // This function is only called on atomic stores that are too large to be
209 // atomic if implemented as a native store. So we replace them by an
210 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
211 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
212 // It is the responsibility of the target to only return true in
213 // shouldExpandAtomicRMW in cases where this is required and possible.
214 IRBuilder<> Builder(SI);
216 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
217 SI->getValueOperand(), SI->getOrdering());
218 SI->eraseFromParent();
220 // Now we have an appropriate swap instruction, lower it as usual.
221 return expandAtomicRMW(AI);
224 bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) {
225 if (TM->getSubtargetImpl()
226 ->getTargetLowering()
227 ->hasLoadLinkedStoreConditional())
228 return expandAtomicRMWToLLSC(AI);
230 return expandAtomicRMWToCmpXchg(AI);
233 /// Emit IR to implement the given atomicrmw operation on values in registers,
234 /// returning the new value.
235 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
236 Value *Loaded, Value *Inc) {
239 case AtomicRMWInst::Xchg:
241 case AtomicRMWInst::Add:
242 return Builder.CreateAdd(Loaded, Inc, "new");
243 case AtomicRMWInst::Sub:
244 return Builder.CreateSub(Loaded, Inc, "new");
245 case AtomicRMWInst::And:
246 return Builder.CreateAnd(Loaded, Inc, "new");
247 case AtomicRMWInst::Nand:
248 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
249 case AtomicRMWInst::Or:
250 return Builder.CreateOr(Loaded, Inc, "new");
251 case AtomicRMWInst::Xor:
252 return Builder.CreateXor(Loaded, Inc, "new");
253 case AtomicRMWInst::Max:
254 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
255 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
256 case AtomicRMWInst::Min:
257 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
258 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
259 case AtomicRMWInst::UMax:
260 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
261 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
262 case AtomicRMWInst::UMin:
263 NewVal = Builder.CreateICmpULE(Loaded, Inc);
264 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
266 llvm_unreachable("Unknown atomic op");
270 bool AtomicExpand::expandAtomicRMWToLLSC(AtomicRMWInst *AI) {
271 auto TLI = TM->getSubtargetImpl()->getTargetLowering();
272 AtomicOrdering MemOpOrder = AI->getOrdering();
273 Value *Addr = AI->getPointerOperand();
274 BasicBlock *BB = AI->getParent();
275 Function *F = BB->getParent();
276 LLVMContext &Ctx = F->getContext();
278 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
280 // The standard expansion we produce is:
284 // %loaded = @load.linked(%addr)
285 // %new = some_op iN %loaded, %incr
286 // %stored = @store_conditional(%new, %addr)
287 // %try_again = icmp i32 ne %stored, 0
288 // br i1 %try_again, label %loop, label %atomicrmw.end
292 BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
293 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
295 // This grabs the DebugLoc from AI.
296 IRBuilder<> Builder(AI);
298 // The split call above "helpfully" added a branch at the end of BB (to the
299 // wrong place), but we might want a fence too. It's easiest to just remove
300 // the branch entirely.
301 std::prev(BB->end())->eraseFromParent();
302 Builder.SetInsertPoint(BB);
303 Builder.CreateBr(LoopBB);
305 // Start the main loop block now that we've taken care of the preliminaries.
306 Builder.SetInsertPoint(LoopBB);
307 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
310 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
312 Value *StoreSuccess =
313 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
314 Value *TryAgain = Builder.CreateICmpNE(
315 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
316 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
318 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
320 AI->replaceAllUsesWith(Loaded);
321 AI->eraseFromParent();
326 bool AtomicExpand::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI) {
327 AtomicOrdering MemOpOrder =
328 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
329 Value *Addr = AI->getPointerOperand();
330 BasicBlock *BB = AI->getParent();
331 Function *F = BB->getParent();
332 LLVMContext &Ctx = F->getContext();
334 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
336 // The standard expansion we produce is:
338 // %init_loaded = load atomic iN* %addr
341 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
342 // %new = some_op iN %loaded, %incr
343 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
344 // %new_loaded = extractvalue { iN, i1 } %pair, 0
345 // %success = extractvalue { iN, i1 } %pair, 1
346 // br i1 %success, label %atomicrmw.end, label %loop
349 BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
350 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
352 // This grabs the DebugLoc from AI.
353 IRBuilder<> Builder(AI);
355 // The split call above "helpfully" added a branch at the end of BB (to the
356 // wrong place), but we want a load. It's easiest to just remove
357 // the branch entirely.
358 std::prev(BB->end())->eraseFromParent();
359 Builder.SetInsertPoint(BB);
360 LoadInst *InitLoaded = Builder.CreateLoad(Addr);
361 // Atomics require at least natural alignment.
362 InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits());
363 Builder.CreateBr(LoopBB);
365 // Start the main loop block now that we've taken care of the preliminaries.
366 Builder.SetInsertPoint(LoopBB);
367 PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
368 Loaded->addIncoming(InitLoaded, BB);
371 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
373 Value *Pair = Builder.CreateAtomicCmpXchg(
374 Addr, Loaded, NewVal, MemOpOrder,
375 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
376 Value *NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
377 Loaded->addIncoming(NewLoaded, LoopBB);
379 Value *Success = Builder.CreateExtractValue(Pair, 1, "success");
380 Builder.CreateCondBr(Success, ExitBB, LoopBB);
382 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
384 AI->replaceAllUsesWith(NewLoaded);
385 AI->eraseFromParent();
390 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
391 auto TLI = TM->getSubtargetImpl()->getTargetLowering();
392 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
393 AtomicOrdering FailureOrder = CI->getFailureOrdering();
394 Value *Addr = CI->getPointerOperand();
395 BasicBlock *BB = CI->getParent();
396 Function *F = BB->getParent();
397 LLVMContext &Ctx = F->getContext();
398 // If getInsertFencesForAtomic() returns true, then the target does not want
399 // to deal with memory orders, and emitLeading/TrailingFence should take care
400 // of everything. Otherwise, emitLeading/TrailingFence are no-op and we
401 // should preserve the ordering.
402 AtomicOrdering MemOpOrder =
403 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
405 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
407 // The full expansion we produce is:
411 // %loaded = @load.linked(%addr)
412 // %should_store = icmp eq %loaded, %desired
413 // br i1 %should_store, label %cmpxchg.trystore,
414 // label %cmpxchg.failure
416 // %stored = @store_conditional(%new, %addr)
417 // %success = icmp eq i32 %stored, 0
418 // br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
421 // br label %cmpxchg.end
424 // br label %cmpxchg.end
426 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
427 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
428 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
430 BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
431 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
432 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, FailureBB);
433 auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
434 auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
436 // This grabs the DebugLoc from CI
437 IRBuilder<> Builder(CI);
439 // The split call above "helpfully" added a branch at the end of BB (to the
440 // wrong place), but we might want a fence too. It's easiest to just remove
441 // the branch entirely.
442 std::prev(BB->end())->eraseFromParent();
443 Builder.SetInsertPoint(BB);
444 TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
446 Builder.CreateBr(LoopBB);
448 // Start the main loop block now that we've taken care of the preliminaries.
449 Builder.SetInsertPoint(LoopBB);
450 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
452 Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
454 // If the the cmpxchg doesn't actually need any ordering when it fails, we can
455 // jump straight past that fence instruction (if it exists).
456 Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
458 Builder.SetInsertPoint(TryStoreBB);
459 Value *StoreSuccess = TLI->emitStoreConditional(
460 Builder, CI->getNewValOperand(), Addr, MemOpOrder);
461 StoreSuccess = Builder.CreateICmpEQ(
462 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
463 Builder.CreateCondBr(StoreSuccess, SuccessBB,
464 CI->isWeak() ? FailureBB : LoopBB);
466 // Make sure later instructions don't get reordered with a fence if necessary.
467 Builder.SetInsertPoint(SuccessBB);
468 TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
470 Builder.CreateBr(ExitBB);
472 Builder.SetInsertPoint(FailureBB);
473 TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
475 Builder.CreateBr(ExitBB);
477 // Finally, we have control-flow based knowledge of whether the cmpxchg
478 // succeeded or not. We expose this to later passes by converting any
479 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
481 // Setup the builder so we can create any PHIs we need.
482 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
483 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
484 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
485 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
487 // Look for any users of the cmpxchg that are just comparing the loaded value
488 // against the desired one, and replace them with the CFG-derived version.
489 SmallVector<ExtractValueInst *, 2> PrunedInsts;
490 for (auto User : CI->users()) {
491 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
495 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
496 "weird extraction from { iN, i1 }");
498 if (EV->getIndices()[0] == 0)
499 EV->replaceAllUsesWith(Loaded);
501 EV->replaceAllUsesWith(Success);
503 PrunedInsts.push_back(EV);
506 // We can remove the instructions now we're no longer iterating through them.
507 for (auto EV : PrunedInsts)
508 EV->eraseFromParent();
510 if (!CI->use_empty()) {
511 // Some use of the full struct return that we don't understand has happened,
512 // so we've got to reconstruct it properly.
514 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
515 Res = Builder.CreateInsertValue(Res, Success, 1);
517 CI->replaceAllUsesWith(Res);
520 CI->eraseFromParent();