1 //===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass (at IR level) to replace atomic instructions with
11 // target specific instruction which implement the same semantics in a way
12 // which better fits the target backend. This can include the use of either
13 // (intrinsic-based) load-linked/store-conditional loops, AtomicCmpXchg, or
16 //===----------------------------------------------------------------------===//
18 #include "llvm/ADT/SetOperations.h"
19 #include "llvm/ADT/SetVector.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/Analysis/MemoryLocation.h"
23 #include "llvm/CodeGen/AtomicExpandUtils.h"
24 #include "llvm/CodeGen/Passes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InstIterator.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetLowering.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetSubtargetInfo.h"
40 #define DEBUG_TYPE "atomic-expand"
43 class AtomicExpand: public FunctionPass {
44 const TargetMachine *TM;
45 const TargetLowering *TLI;
47 static char ID; // Pass identification, replacement for typeid
48 explicit AtomicExpand(const TargetMachine *TM = nullptr)
49 : FunctionPass(ID), TM(TM), TLI(nullptr) {
50 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
53 bool runOnFunction(Function &F) override;
56 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
57 bool IsStore, bool IsLoad);
58 IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
59 LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
60 bool tryExpandAtomicLoad(LoadInst *LI);
61 bool expandAtomicLoadToLL(LoadInst *LI);
62 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
63 StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
64 bool expandAtomicStore(StoreInst *SI);
65 bool tryExpandAtomicRMW(AtomicRMWInst *AI);
66 bool expandAtomicOpToLLSC(
67 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
68 std::function<Value *(IRBuilder<> &, Value *)> PerformOp);
69 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
70 bool isIdempotentRMW(AtomicRMWInst *AI);
71 bool simplifyIdempotentRMW(AtomicRMWInst *AI);
75 char AtomicExpand::ID = 0;
76 char &llvm::AtomicExpandID = AtomicExpand::ID;
77 INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
78 "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
81 FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
82 return new AtomicExpand(TM);
88 bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal);
89 Value* GetUntaintedAddress(Value* CurrentAddress);
91 // The depth we trace down a variable to look for its dependence set.
92 const unsigned kDependenceDepth = 4;
94 // Recursively looks for variables that 'Val' depends on at the given depth
95 // 'Depth', and adds them in 'DepSet'. If 'InsertOnlyLeafNodes' is true, only
96 // inserts the leaf node values; otherwise, all visited nodes are included in
97 // 'DepSet'. Note that constants will be ignored.
98 template <typename SetType>
99 void recursivelyFindDependence(SetType* DepSet, Value* Val,
100 bool InsertOnlyLeafNodes = false,
101 unsigned Depth = kDependenceDepth) {
102 if (Val == nullptr) {
105 if (!InsertOnlyLeafNodes && !isa<Constant>(Val)) {
109 // Cannot go deeper. Insert the leaf nodes.
110 if (InsertOnlyLeafNodes && !isa<Constant>(Val)) {
116 // Go one step further to explore the dependence of the operands.
117 Instruction* I = nullptr;
118 if ((I = dyn_cast<Instruction>(Val))) {
119 if (isa<LoadInst>(I)) {
120 // A load is considerd the leaf load of the dependence tree. Done.
123 } else if (I->isBinaryOp()) {
124 BinaryOperator* I = dyn_cast<BinaryOperator>(Val);
125 Value *Op0 = I->getOperand(0), *Op1 = I->getOperand(1);
126 recursivelyFindDependence(DepSet, Op0, Depth - 1);
127 recursivelyFindDependence(DepSet, Op1, Depth - 1);
128 } else if (I->isCast()) {
129 Value* Op0 = I->getOperand(0);
130 recursivelyFindDependence(DepSet, Op0, Depth - 1);
131 } else if (I->getOpcode() == Instruction::Select) {
132 Value* Op0 = I->getOperand(0);
133 Value* Op1 = I->getOperand(1);
134 Value* Op2 = I->getOperand(2);
135 recursivelyFindDependence(DepSet, Op0, Depth - 1);
136 recursivelyFindDependence(DepSet, Op1, Depth - 1);
137 recursivelyFindDependence(DepSet, Op2, Depth - 1);
138 } else if (I->getOpcode() == Instruction::GetElementPtr) {
139 for (unsigned i = 0; i < I->getNumOperands(); i++) {
140 recursivelyFindDependence(DepSet, I->getOperand(i), Depth - 1);
142 } else if (I->getOpcode() == Instruction::Store) {
143 auto* SI = dyn_cast<StoreInst>(Val);
144 recursivelyFindDependence(DepSet, SI->getPointerOperand(), Depth - 1);
145 recursivelyFindDependence(DepSet, SI->getValueOperand(), Depth - 1);
147 Value* Op0 = nullptr;
148 Value* Op1 = nullptr;
149 switch (I->getOpcode()) {
150 case Instruction::ICmp:
151 case Instruction::FCmp: {
152 Op0 = I->getOperand(0);
153 Op1 = I->getOperand(1);
154 recursivelyFindDependence(DepSet, Op0, Depth - 1);
155 recursivelyFindDependence(DepSet, Op1, Depth - 1);
159 // Be conservative. Add it and be done with it.
165 } else if (isa<Constant>(Val)) {
166 // Not interested in constant values. Done.
169 // Be conservative. Add it and be done with it.
175 // Helper function to create a Cast instruction.
176 Value* createCast(IRBuilder<true, NoFolder>& Builder, Value* DepVal,
177 Type* TargetIntegerType) {
178 Instruction::CastOps CastOp = Instruction::BitCast;
179 switch (DepVal->getType()->getTypeID()) {
180 case Type::IntegerTyID: {
181 CastOp = Instruction::SExt;
184 case Type::FloatTyID:
185 case Type::DoubleTyID: {
186 CastOp = Instruction::FPToSI;
189 case Type::PointerTyID: {
190 CastOp = Instruction::PtrToInt;
196 return Builder.CreateCast(CastOp, DepVal, TargetIntegerType);
199 // Given a value, if it's a tainted address, this function returns the
200 // instruction that ORs the "dependence value" with the "original address".
201 // Otherwise, returns nullptr. This instruction is the first OR instruction
202 // where one of its operand is an AND instruction with an operand being 0.
204 // E.g., it returns '%4 = or i32 %3, %2' given 'CurrentAddress' is '%5'.
205 // %0 = load i32, i32* @y, align 4, !tbaa !1
206 // %cmp = icmp ne i32 %0, 42 // <== this is like the condition
207 // %1 = sext i1 %cmp to i32
208 // %2 = ptrtoint i32* @x to i32
209 // %3 = and i32 %1, 0
210 // %4 = or i32 %3, %2
211 // %5 = inttoptr i32 %4 to i32*
212 // store i32 1, i32* %5, align 4
213 Instruction* getOrAddress(Value* CurrentAddress) {
214 // Is it a cast from integer to pointer type.
215 Instruction* OrAddress = nullptr;
216 Instruction* AndDep = nullptr;
217 Instruction* CastToInt = nullptr;
218 Value* ActualAddress = nullptr;
219 Constant* ZeroConst = nullptr;
221 const Instruction* CastToPtr = dyn_cast<Instruction>(CurrentAddress);
222 if (CastToPtr && CastToPtr->getOpcode() == Instruction::IntToPtr) {
223 // Is it an OR instruction: %1 = or %and, %actualAddress.
224 if ((OrAddress = dyn_cast<Instruction>(CastToPtr->getOperand(0))) &&
225 OrAddress->getOpcode() == Instruction::Or) {
226 // The first operand should be and AND instruction.
227 AndDep = dyn_cast<Instruction>(OrAddress->getOperand(0));
228 if (AndDep && AndDep->getOpcode() == Instruction::And) {
229 // Also make sure its first operand of the "AND" is 0, or the "AND" is
230 // marked explicitly by "NoInstCombine".
231 if ((ZeroConst = dyn_cast<Constant>(AndDep->getOperand(1))) &&
232 ZeroConst->isNullValue()) {
238 // Looks like it's not been tainted.
242 // Given a value, if it's a tainted address, this function returns the
243 // instruction that taints the "dependence value". Otherwise, returns nullptr.
244 // This instruction is the last AND instruction where one of its operand is 0.
245 // E.g., it returns '%3' given 'CurrentAddress' is '%5'.
246 // %0 = load i32, i32* @y, align 4, !tbaa !1
247 // %cmp = icmp ne i32 %0, 42 // <== this is like the condition
248 // %1 = sext i1 %cmp to i32
249 // %2 = ptrtoint i32* @x to i32
250 // %3 = and i32 %1, 0
251 // %4 = or i32 %3, %2
252 // %5 = inttoptr i32 %4 to i32*
253 // store i32 1, i32* %5, align 4
254 Instruction* getAndDependence(Value* CurrentAddress) {
255 // If 'CurrentAddress' is tainted, get the OR instruction.
256 auto* OrAddress = getOrAddress(CurrentAddress);
257 if (OrAddress == nullptr) {
261 // No need to check the operands.
262 auto* AndDepInst = dyn_cast<Instruction>(OrAddress->getOperand(0));
267 // Given a value, if it's a tainted address, this function returns
268 // the "dependence value", which is the first operand in the AND instruction.
269 // E.g., it returns '%1' given 'CurrentAddress' is '%5'.
270 // %0 = load i32, i32* @y, align 4, !tbaa !1
271 // %cmp = icmp ne i32 %0, 42 // <== this is like the condition
272 // %1 = sext i1 %cmp to i32
273 // %2 = ptrtoint i32* @x to i32
274 // %3 = and i32 %1, 0
275 // %4 = or i32 %3, %2
276 // %5 = inttoptr i32 %4 to i32*
277 // store i32 1, i32* %5, align 4
278 Value* getDependence(Value* CurrentAddress) {
279 auto* AndInst = getAndDependence(CurrentAddress);
280 if (AndInst == nullptr) {
283 return AndInst->getOperand(0);
286 // Given an address that has been tainted, returns the only condition it depends
287 // on, if any; otherwise, returns nullptr.
288 Value* getConditionDependence(Value* Address) {
289 auto* Dep = getDependence(Address);
290 if (Dep == nullptr) {
291 // 'Address' has not been dependence-tainted.
295 Value* Operand = Dep;
297 auto* Inst = dyn_cast<Instruction>(Operand);
298 if (Inst == nullptr) {
299 // Non-instruction type does not have condition dependence.
302 if (Inst->getOpcode() == Instruction::ICmp) {
305 if (Inst->getNumOperands() != 1) {
308 Operand = Inst->getOperand(0);
314 // Conservatively decides whether the dependence set of 'Val1' includes the
315 // dependence set of 'Val2'. If 'ExpandSecondValue' is false, we do not expand
316 // 'Val2' and use that single value as its dependence set.
317 // If it returns true, it means the dependence set of 'Val1' includes that of
318 // 'Val2'; otherwise, it only means we cannot conclusively decide it.
319 bool dependenceSetInclusion(Value* Val1, Value* Val2,
320 int Val1ExpandLevel = 2 * kDependenceDepth,
321 int Val2ExpandLevel = kDependenceDepth) {
322 typedef SmallSet<Value*, 8> IncludingSet;
323 typedef SmallSet<Value*, 4> IncludedSet;
325 IncludingSet DepSet1;
327 // Look for more depths for the including set.
328 recursivelyFindDependence(&DepSet1, Val1, false /*Insert all visited nodes*/,
330 recursivelyFindDependence(&DepSet2, Val2, true /*Only insert leaf nodes*/,
333 auto set_inclusion = [](IncludingSet FullSet, IncludedSet Subset) {
334 for (auto* Dep : Subset) {
335 if (0 == FullSet.count(Dep)) {
341 bool inclusion = set_inclusion(DepSet1, DepSet2);
342 DEBUG(dbgs() << "[dependenceSetInclusion]: " << inclusion << "\n");
343 DEBUG(dbgs() << "Including set for: " << *Val1 << "\n");
344 DEBUG(for (const auto* Dep : DepSet1) { dbgs() << "\t\t" << *Dep << "\n"; });
345 DEBUG(dbgs() << "Included set for: " << *Val2 << "\n");
346 DEBUG(for (const auto* Dep : DepSet2) { dbgs() << "\t\t" << *Dep << "\n"; });
351 // Recursively iterates through the operands spawned from 'DepVal'. If there
352 // exists a single value that 'DepVal' only depends on, we call that value the
353 // root dependence of 'DepVal' and return it. Otherwise, return 'DepVal'.
354 Value* getRootDependence(Value* DepVal) {
355 SmallSet<Value*, 8> DepSet;
356 for (unsigned depth = kDependenceDepth; depth > 0; --depth) {
357 recursivelyFindDependence(&DepSet, DepVal, true /*Only insert leaf nodes*/,
359 if (DepSet.size() == 1) {
360 return *DepSet.begin();
367 // This function actually taints 'DepVal' to the address to 'SI'. If the
369 // of 'SI' already depends on whatever 'DepVal' depends on, this function
370 // doesn't do anything and returns false. Otherwise, returns true.
372 // This effect forces the store and any stores that comes later to depend on
373 // 'DepVal'. For example, we have a condition "cond", and a store instruction
374 // "s: STORE addr, val". If we want "s" (and any later store) to depend on
375 // "cond", we do the following:
376 // %conv = sext i1 %cond to i32
377 // %addrVal = ptrtoint i32* %addr to i32
378 // %andCond = and i32 conv, 0;
379 // %orAddr = or i32 %andCond, %addrVal;
380 // %NewAddr = inttoptr i32 %orAddr to i32*;
382 // This is a more concrete example:
384 // %0 = load i32, i32* @y, align 4, !tbaa !1
385 // %cmp = icmp ne i32 %0, 42 // <== this is like the condition
386 // %1 = sext i1 %cmp to i32
387 // %2 = ptrtoint i32* @x to i32
388 // %3 = and i32 %1, 0
389 // %4 = or i32 %3, %2
390 // %5 = inttoptr i32 %4 to i32*
391 // store i32 1, i32* %5, align 4
392 bool taintStoreAddress(StoreInst* SI, Value* DepVal,
393 const char* calling_func = __builtin_FUNCTION()) {
394 DEBUG(dbgs() << "Called from " << calling_func << '\n');
395 IRBuilder<true, NoFolder> Builder(SI);
396 BasicBlock* BB = SI->getParent();
397 Value* Address = SI->getPointerOperand();
398 Type* TargetIntegerType =
399 IntegerType::get(Address->getContext(),
400 BB->getModule()->getDataLayout().getPointerSizeInBits());
402 // Does SI's address already depends on whatever 'DepVal' depends on?
403 if (StoreAddressDependOnValue(SI, DepVal)) {
407 // Figure out if there's a root variable 'DepVal' depends on. For example, we
408 // can extract "getelementptr inbounds %struct, %struct* %0, i64 0, i32 123"
409 // to be "%struct* %0" since all other operands are constant.
410 DepVal = getRootDependence(DepVal);
412 // Is this already a dependence-tainted store?
413 Value* OldDep = getDependence(Address);
415 // The address of 'SI' has already been tainted. Just need to absorb the
416 // DepVal to the existing dependence in the address of SI.
417 Instruction* AndDep = getAndDependence(Address);
418 IRBuilder<true, NoFolder> Builder(AndDep);
419 Value* NewDep = nullptr;
420 if (DepVal->getType() == AndDep->getType()) {
421 NewDep = Builder.CreateAnd(OldDep, DepVal);
423 NewDep = Builder.CreateAnd(
424 OldDep, createCast(Builder, DepVal, TargetIntegerType));
427 auto* NewDepInst = dyn_cast<Instruction>(NewDep);
429 // Use the new AND instruction as the dependence
430 AndDep->setOperand(0, NewDep);
434 // SI's address has not been tainted. Now taint it with 'DepVal'.
435 Value* CastDepToInt = createCast(Builder, DepVal, TargetIntegerType);
436 Value* PtrToIntCast = Builder.CreatePtrToInt(Address, TargetIntegerType);
438 Builder.CreateAnd(CastDepToInt, ConstantInt::get(TargetIntegerType, 0));
439 auto AndInst = dyn_cast<Instruction>(AndDepVal);
440 // XXX-comment: The original IR InstCombiner would change our and instruction
441 // to a select and then the back end optimize the condition out. We attach a
442 // flag to instructions and set it here to inform the InstCombiner to not to
443 // touch this and instruction at all.
444 Value* OrAddr = Builder.CreateOr(AndDepVal, PtrToIntCast);
445 Value* NewAddr = Builder.CreateIntToPtr(OrAddr, Address->getType());
447 DEBUG(dbgs() << "[taintStoreAddress]\n"
448 << "Original store: " << *SI << '\n');
449 SI->setOperand(1, NewAddr);
452 DEBUG(dbgs() << "\tTargetIntegerType: " << *TargetIntegerType << '\n'
453 << "\tCast dependence value to integer: " << *CastDepToInt
455 << "\tCast address to integer: " << *PtrToIntCast << '\n'
456 << "\tAnd dependence value: " << *AndDepVal << '\n'
457 << "\tOr address: " << *OrAddr << '\n'
458 << "\tCast or instruction to address: " << *NewAddr << "\n\n");
463 // Looks for the previous store in the if block --- 'BrBB', which makes the
464 // speculative store 'StoreToHoist' safe.
465 Value* getSpeculativeStoreInPrevBB(StoreInst* StoreToHoist, BasicBlock* BrBB) {
466 assert(StoreToHoist && "StoreToHoist must be a real store");
468 Value* StorePtr = StoreToHoist->getPointerOperand();
470 // Look for a store to the same pointer in BrBB.
471 for (BasicBlock::reverse_iterator RI = BrBB->rbegin(), RE = BrBB->rend();
473 Instruction* CurI = &*RI;
475 StoreInst* SI = dyn_cast<StoreInst>(CurI);
476 // Found the previous store make sure it stores to the same location.
477 // XXX-update: If the previous store's original untainted address are the
478 // same as 'StorePtr', we are also good to hoist the store.
479 if (SI && (SI->getPointerOperand() == StorePtr ||
480 GetUntaintedAddress(SI->getPointerOperand()) == StorePtr)) {
481 // Found the previous store, return its value operand.
487 "We should not reach here since this store is safe to speculate");
490 // XXX-comment: Returns true if it changes the code, false otherwise (the branch
491 // condition already depends on 'DepVal'.
492 bool taintConditionalBranch(BranchInst* BI, Value* DepVal) {
493 assert(BI->isConditional());
494 auto* Cond = BI->getOperand(0);
495 if (dependenceSetInclusion(Cond, DepVal)) {
496 // The dependence/ordering is self-evident.
500 IRBuilder<true, NoFolder> Builder(BI);
502 Builder.CreateAnd(DepVal, ConstantInt::get(DepVal->getType(), 0));
504 Builder.CreateTrunc(AndDep, IntegerType::get(DepVal->getContext(), 1));
505 auto* OrCond = Builder.CreateOr(TruncAndDep, Cond);
506 BI->setOperand(0, OrCond);
510 // XXX-update: For a relaxed load 'LI', find the first immediate atomic store or
511 // the first conditional branch. Returns itself if 'LI' can be left as is;
512 // returns nullptr if there's no such immediately following store/branch
513 // instructions, which we can only enforce the load with 'acquire'.
514 Instruction* findFirstStoreCondBranchInst(LoadInst* LI) {
515 // In some situations, relaxed loads can be left as is:
516 // 1. The relaxed load is used to calculate the address of the immediate
518 // 2. The relaxed load is used as a condition in the immediate following
519 // condition, and there are no stores in between. This is actually quite
521 // int r1 = x.load(relaxed);
523 // y.store(1, relaxed);
526 auto* BB = LI->getParent();
528 auto BBI = BasicBlock::iterator(LI);
531 for (; BBI != BE; BBI++) {
532 auto* Inst = dyn_cast<Instruction>(&*BBI);
533 if (Inst == nullptr) {
536 if (Inst->getOpcode() == Instruction::Store) {
538 } else if (Inst->getOpcode() == Instruction::Br) {
539 auto* BrInst = dyn_cast<BranchInst>(Inst);
540 if (BrInst->isConditional()) {
543 // Reinitialize iterators with the destination of the unconditional
545 BB = BrInst->getSuccessor(0);
558 void taintMonotonicLoads(const SmallVector<LoadInst*, 1>& MonotonicLoadInsts) {
559 for (auto* LI : MonotonicLoadInsts) {
560 auto* FirstInst = findFirstStoreCondBranchInst(LI);
561 if (FirstInst == nullptr) {
562 // No need to worry about the relaxed load.
565 if (FirstInst == LI) {
566 // We don't seem to be able to taint a following store/conditional branch
567 // instruction. Simply make it acquire.
568 LI->setOrdering(Acquire);
571 // Taint 'FirstInst', which could be a store or a condition branch
573 if (FirstInst->getOpcode() == Instruction::Store) {
574 taintStoreAddress(dyn_cast<StoreInst>(FirstInst), LI);
575 } else if (FirstInst->getOpcode() == Instruction::Br) {
576 taintConditionalBranch(dyn_cast<BranchInst>(FirstInst), LI);
578 assert(false && "findFirstStoreCondBranchInst() should return a "
579 "store/condition branch instruction");
584 /**** Implementations of public methods for dependence tainting ****/
585 Value* GetUntaintedAddress(Value* CurrentAddress) {
586 auto* OrAddress = getOrAddress(CurrentAddress);
587 if (OrAddress == nullptr) {
588 // Is it tainted by a select instruction?
589 auto* Inst = dyn_cast<Instruction>(CurrentAddress);
590 if (nullptr != Inst && Inst->getOpcode() == Instruction::Select) {
591 // A selection instruction.
592 if (Inst->getOperand(1) == Inst->getOperand(2)) {
593 return Inst->getOperand(1);
597 return CurrentAddress;
599 Value* ActualAddress = nullptr;
601 auto* CastToInt = dyn_cast<Instruction>(OrAddress->getOperand(1));
602 if (CastToInt && CastToInt->getOpcode() == Instruction::PtrToInt) {
603 return CastToInt->getOperand(0);
605 // This should be a IntToPtr constant expression.
606 ConstantExpr* PtrToIntExpr =
607 dyn_cast<ConstantExpr>(OrAddress->getOperand(1));
608 if (PtrToIntExpr && PtrToIntExpr->getOpcode() == Instruction::PtrToInt) {
609 return PtrToIntExpr->getOperand(0);
613 // Looks like it's not been dependence-tainted. Returns itself.
614 return CurrentAddress;
617 MemoryLocation GetUntaintedMemoryLocation(StoreInst* SI) {
619 SI->getAAMetadata(AATags);
620 const auto& DL = SI->getModule()->getDataLayout();
621 const auto* OriginalAddr = GetUntaintedAddress(SI->getPointerOperand());
622 DEBUG(if (OriginalAddr != SI->getPointerOperand()) {
623 dbgs() << "[GetUntaintedMemoryLocation]\n"
624 << "Storing address: " << *SI->getPointerOperand()
625 << "\nUntainted address: " << *OriginalAddr << "\n";
627 return MemoryLocation(OriginalAddr,
628 DL.getTypeStoreSize(SI->getValueOperand()->getType()),
632 bool TaintDependenceToStore(StoreInst* SI, Value* DepVal) {
633 if (dependenceSetInclusion(SI, DepVal)) {
637 bool tainted = taintStoreAddress(SI, DepVal);
642 bool TaintDependenceToStoreAddress(StoreInst* SI, Value* DepVal) {
643 if (dependenceSetInclusion(SI->getPointerOperand(), DepVal)) {
647 bool tainted = taintStoreAddress(SI, DepVal);
652 bool CompressTaintedStore(BasicBlock* BB) {
653 // This function looks for windows of adajcent stores in 'BB' that satisfy the
654 // following condition (and then do optimization):
655 // *Addr(d1) = v1, d1 is a condition and is the only dependence the store's
656 // address depends on && Dep(v1) includes Dep(d1);
657 // *Addr(d2) = v2, d2 is a condition and is the only dependnece the store's
658 // address depends on && Dep(v2) includes Dep(d2) &&
659 // Dep(d2) includes Dep(d1);
661 // *Addr(dN) = vN, dN is a condition and is the only dependence the store's
662 // address depends on && Dep(dN) includes Dep(d"N-1").
664 // As a result, Dep(dN) includes [Dep(d1) V ... V Dep(d"N-1")], so we can
665 // safely transform the above to the following. In between these stores, we
666 // can omit untainted stores to the same address 'Addr' since they internally
667 // have dependence on the previous stores on the same address.
672 for (auto BI = BB->begin(), BE = BB->end(); BI != BE; BI++) {
673 // Look for the first store in such a window of adajacent stores.
674 auto* FirstSI = dyn_cast<StoreInst>(&*BI);
679 // The first store in the window must be tainted.
680 auto* UntaintedAddress = GetUntaintedAddress(FirstSI->getPointerOperand());
681 if (UntaintedAddress == FirstSI->getPointerOperand()) {
685 // The first store's address must directly depend on and only depend on a
687 auto* FirstSIDepCond = getConditionDependence(FirstSI->getPointerOperand());
688 if (nullptr == FirstSIDepCond) {
692 // Dep(first store's storing value) includes Dep(tainted dependence).
693 if (!dependenceSetInclusion(FirstSI->getValueOperand(), FirstSIDepCond)) {
697 // Look for subsequent stores to the same address that satisfy the condition
698 // of "compressing the dependence".
699 SmallVector<StoreInst*, 8> AdajacentStores;
700 AdajacentStores.push_back(FirstSI);
701 auto BII = BasicBlock::iterator(FirstSI);
702 for (BII++; BII != BE; BII++) {
703 auto* CurrSI = dyn_cast<StoreInst>(&*BII);
705 if (BII->mayHaveSideEffects()) {
706 // Be conservative. Instructions with side effects are similar to
713 auto* OrigAddress = GetUntaintedAddress(CurrSI->getPointerOperand());
714 auto* CurrSIDepCond = getConditionDependence(CurrSI->getPointerOperand());
715 // All other stores must satisfy either:
716 // A. 'CurrSI' is an untainted store to the same address, or
717 // B. the combination of the following 5 subconditions:
719 // 2. Untainted address is the same as the group's address;
720 // 3. The address is tainted with a sole value which is a condition;
721 // 4. The storing value depends on the condition in 3.
722 // 5. The condition in 3 depends on the previous stores dependence
725 // Condition A. Should ignore this store directly.
726 if (OrigAddress == CurrSI->getPointerOperand() &&
727 OrigAddress == UntaintedAddress) {
730 // Check condition B.
731 Value* Cond = nullptr;
732 if (OrigAddress == CurrSI->getPointerOperand() ||
733 OrigAddress != UntaintedAddress || CurrSIDepCond == nullptr ||
734 !dependenceSetInclusion(CurrSI->getValueOperand(), CurrSIDepCond)) {
735 // Check condition 1, 2, 3 & 4.
739 // Check condition 5.
740 StoreInst* PrevSI = AdajacentStores[AdajacentStores.size() - 1];
741 auto* PrevSIDepCond = getConditionDependence(PrevSI->getPointerOperand());
742 assert(PrevSIDepCond &&
743 "Store in the group must already depend on a condtion");
744 if (!dependenceSetInclusion(CurrSIDepCond, PrevSIDepCond)) {
748 AdajacentStores.push_back(CurrSI);
751 if (AdajacentStores.size() == 1) {
752 // The outer loop should keep looking from the next store.
756 // Now we have such a group of tainted stores to the same address.
757 DEBUG(dbgs() << "[CompressTaintedStore]\n");
758 DEBUG(dbgs() << "Original BB\n");
759 DEBUG(dbgs() << *BB << '\n');
760 auto* LastSI = AdajacentStores[AdajacentStores.size() - 1];
761 for (int i = 0; i < AdajacentStores.size() - 1; ++i) {
762 auto* SI = AdajacentStores[i];
764 // Use the original address for stores before the last one.
765 SI->setOperand(1, UntaintedAddress);
767 DEBUG(dbgs() << "Store address has been reversed: " << *SI << '\n';);
769 // XXX-comment: Try to make the last store use fewer registers.
770 // If LastSI's storing value is a select based on the condition with which
771 // its address is tainted, transform the tainted address to a select
772 // instruction, as follows:
773 // r1 = Select Cond ? A : B
778 // r1 = Select Cond ? A : B
779 // r2 = Select Cond ? Addr : Addr
781 // The idea is that both Select instructions depend on the same condition,
782 // so hopefully the backend can generate two cmov instructions for them (and
783 // this saves the number of registers needed).
784 auto* LastSIDep = getConditionDependence(LastSI->getPointerOperand());
785 auto* LastSIValue = dyn_cast<Instruction>(LastSI->getValueOperand());
786 if (LastSIValue && LastSIValue->getOpcode() == Instruction::Select &&
787 LastSIValue->getOperand(0) == LastSIDep) {
788 // XXX-comment: Maybe it's better for us to just leave it as an and/or
789 // dependence pattern.
791 IRBuilder<true, NoFolder> Builder(LastSI);
793 Builder.CreateSelect(LastSIDep, UntaintedAddress, UntaintedAddress);
794 LastSI->setOperand(1, Address);
795 DEBUG(dbgs() << "The last store becomes :" << *LastSI << "\n\n";);
803 bool PassDependenceToStore(Value* OldAddress, StoreInst* NewStore) {
804 Value* OldDep = getDependence(OldAddress);
805 // Return false when there's no dependence to pass from the OldAddress.
810 // No need to pass the dependence to NewStore's address if it already depends
811 // on whatever 'OldAddress' depends on.
812 if (StoreAddressDependOnValue(NewStore, OldDep)) {
815 return taintStoreAddress(NewStore, OldAddress);
818 SmallSet<Value*, 8> FindDependence(Value* Val) {
819 SmallSet<Value*, 8> DepSet;
820 recursivelyFindDependence(&DepSet, Val, true /*Only insert leaf nodes*/);
824 bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal) {
825 return dependenceSetInclusion(SI->getPointerOperand(), DepVal);
828 bool StoreDependOnValue(StoreInst* SI, Value* Dep) {
829 return dependenceSetInclusion(SI, Dep);
835 bool AtomicExpand::runOnFunction(Function &F) {
836 if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
838 TLI = TM->getSubtargetImpl(F)->getTargetLowering();
840 SmallVector<Instruction *, 1> AtomicInsts;
841 SmallVector<LoadInst*, 1> MonotonicLoadInsts;
843 // Changing control-flow while iterating through it is a bad idea, so gather a
844 // list of all atomic instructions before we start.
845 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
846 // XXX-update: For relaxed loads, change them to acquire. This includes
847 // relaxed loads, relaxed atomic RMW & relaxed atomic compare exchange.
849 switch (I->getOpcode()) {
850 case Instruction::AtomicCmpXchg: {
851 // XXX-comment: AtomicCmpXchg in AArch64 will be translated to a
852 // conditional branch that contains the value of the load anyway, so
853 // we don't need to do anything.
855 auto* CmpXchg = dyn_cast<AtomicCmpXchgInst>(&*I);
856 auto SuccOrdering = CmpXchg->getSuccessOrdering();
857 if (SuccOrdering == Monotonic) {
858 CmpXchg->setSuccessOrdering(Acquire);
859 } else if (SuccOrdering == Release) {
860 CmpXchg->setSuccessOrdering(AcquireRelease);
865 case Instruction::AtomicRMW: {
866 // XXX-comment: Similar to AtomicCmpXchg. These instructions in
867 // AArch64 will be translated to a loop whose condition depends on the
868 // store status, which further depends on the load value.
870 auto* RMW = dyn_cast<AtomicRMWInst>(&*I);
871 if (RMW->getOrdering() == Monotonic) {
872 RMW->setOrdering(Acquire);
877 case Instruction::Load: {
878 auto* LI = dyn_cast<LoadInst>(&*I);
879 if (LI->getOrdering() == Monotonic) {
881 DEBUG(dbgs() << "Transforming relaxed loads to acquire loads: "
883 LI->setOrdering(Acquire);
885 MonotonicLoadInsts.push_back(LI);
893 AtomicInsts.push_back(&*I);
897 bool MadeChange = false;
898 for (auto I : AtomicInsts) {
899 auto LI = dyn_cast<LoadInst>(I);
900 auto SI = dyn_cast<StoreInst>(I);
901 auto RMWI = dyn_cast<AtomicRMWInst>(I);
902 auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
903 assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
904 "Unknown atomic instruction");
906 auto FenceOrdering = Monotonic;
907 bool IsStore, IsLoad;
908 if (TLI->getInsertFencesForAtomic()) {
909 if (LI && isAtLeastAcquire(LI->getOrdering())) {
910 FenceOrdering = LI->getOrdering();
911 LI->setOrdering(Monotonic);
914 } else if (SI && isAtLeastRelease(SI->getOrdering())) {
915 FenceOrdering = SI->getOrdering();
916 SI->setOrdering(Monotonic);
919 } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
920 isAtLeastAcquire(RMWI->getOrdering()))) {
921 FenceOrdering = RMWI->getOrdering();
922 RMWI->setOrdering(Monotonic);
923 IsStore = IsLoad = true;
924 } else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
925 (isAtLeastRelease(CASI->getSuccessOrdering()) ||
926 isAtLeastAcquire(CASI->getSuccessOrdering()))) {
927 // If a compare and swap is lowered to LL/SC, we can do smarter fence
928 // insertion, with a stronger one on the success path than on the
929 // failure path. As a result, fence insertion is directly done by
930 // expandAtomicCmpXchg in that case.
931 FenceOrdering = CASI->getSuccessOrdering();
932 CASI->setSuccessOrdering(Monotonic);
933 CASI->setFailureOrdering(Monotonic);
934 IsStore = IsLoad = true;
937 if (FenceOrdering != Monotonic) {
938 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
943 if (LI->getType()->isFloatingPointTy()) {
944 // TODO: add a TLI hook to control this so that each target can
945 // convert to lowering the original type one at a time.
946 LI = convertAtomicLoadToIntegerType(LI);
947 assert(LI->getType()->isIntegerTy() && "invariant broken");
951 MadeChange |= tryExpandAtomicLoad(LI);
953 if (SI->getValueOperand()->getType()->isFloatingPointTy()) {
954 // TODO: add a TLI hook to control this so that each target can
955 // convert to lowering the original type one at a time.
956 SI = convertAtomicStoreToIntegerType(SI);
957 assert(SI->getValueOperand()->getType()->isIntegerTy() &&
962 if (TLI->shouldExpandAtomicStoreInIR(SI))
963 MadeChange |= expandAtomicStore(SI);
965 // There are two different ways of expanding RMW instructions:
966 // - into a load if it is idempotent
967 // - into a Cmpxchg/LL-SC loop otherwise
968 // we try them in that order.
970 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
973 MadeChange |= tryExpandAtomicRMW(RMWI);
975 } else if (CASI && TLI->shouldExpandAtomicCmpXchgInIR(CASI)) {
976 MadeChange |= expandAtomicCmpXchg(CASI);
980 taintMonotonicLoads(MonotonicLoadInsts);
985 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
986 bool IsStore, bool IsLoad) {
987 IRBuilder<> Builder(I);
989 auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
991 auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
992 // The trailing fence is emitted before the instruction instead of after
993 // because there is no easy way of setting Builder insertion point after
994 // an instruction. So we must erase it from the BB, and insert it back
995 // in the right place.
996 // We have a guard here because not every atomic operation generates a
999 TrailingFence->removeFromParent();
1000 TrailingFence->insertAfter(I);
1003 return (LeadingFence || TrailingFence);
1006 /// Get the iX type with the same bitwidth as T.
1007 IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
1008 const DataLayout &DL) {
1009 EVT VT = TLI->getValueType(DL, T);
1010 unsigned BitWidth = VT.getStoreSizeInBits();
1011 assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
1012 return IntegerType::get(T->getContext(), BitWidth);
1015 /// Convert an atomic load of a non-integral type to an integer load of the
1016 /// equivelent bitwidth. See the function comment on
1017 /// convertAtomicStoreToIntegerType for background.
1018 LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
1019 auto *M = LI->getModule();
1020 Type *NewTy = getCorrespondingIntegerType(LI->getType(),
1021 M->getDataLayout());
1023 IRBuilder<> Builder(LI);
1025 Value *Addr = LI->getPointerOperand();
1026 Type *PT = PointerType::get(NewTy,
1027 Addr->getType()->getPointerAddressSpace());
1028 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
1030 auto *NewLI = Builder.CreateLoad(NewAddr);
1031 NewLI->setAlignment(LI->getAlignment());
1032 NewLI->setVolatile(LI->isVolatile());
1033 NewLI->setAtomic(LI->getOrdering(), LI->getSynchScope());
1034 DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
1036 Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
1037 LI->replaceAllUsesWith(NewVal);
1038 LI->eraseFromParent();
1042 bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
1043 switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
1044 case TargetLoweringBase::AtomicExpansionKind::None:
1046 case TargetLoweringBase::AtomicExpansionKind::LLSC:
1047 return expandAtomicOpToLLSC(
1048 LI, LI->getPointerOperand(), LI->getOrdering(),
1049 [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; });
1050 case TargetLoweringBase::AtomicExpansionKind::LLOnly:
1051 return expandAtomicLoadToLL(LI);
1052 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
1053 return expandAtomicLoadToCmpXchg(LI);
1055 llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
1058 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
1059 IRBuilder<> Builder(LI);
1061 // On some architectures, load-linked instructions are atomic for larger
1062 // sizes than normal loads. For example, the only 64-bit load guaranteed
1063 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
1065 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
1066 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
1068 LI->replaceAllUsesWith(Val);
1069 LI->eraseFromParent();
1074 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
1075 IRBuilder<> Builder(LI);
1076 AtomicOrdering Order = LI->getOrdering();
1077 Value *Addr = LI->getPointerOperand();
1078 Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
1079 Constant *DummyVal = Constant::getNullValue(Ty);
1081 Value *Pair = Builder.CreateAtomicCmpXchg(
1082 Addr, DummyVal, DummyVal, Order,
1083 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
1084 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
1086 LI->replaceAllUsesWith(Loaded);
1087 LI->eraseFromParent();
1092 /// Convert an atomic store of a non-integral type to an integer store of the
1093 /// equivelent bitwidth. We used to not support floating point or vector
1094 /// atomics in the IR at all. The backends learned to deal with the bitcast
1095 /// idiom because that was the only way of expressing the notion of a atomic
1096 /// float or vector store. The long term plan is to teach each backend to
1097 /// instruction select from the original atomic store, but as a migration
1098 /// mechanism, we convert back to the old format which the backends understand.
1099 /// Each backend will need individual work to recognize the new format.
1100 StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
1101 IRBuilder<> Builder(SI);
1102 auto *M = SI->getModule();
1103 Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
1104 M->getDataLayout());
1105 Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
1107 Value *Addr = SI->getPointerOperand();
1108 Type *PT = PointerType::get(NewTy,
1109 Addr->getType()->getPointerAddressSpace());
1110 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
1112 StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
1113 NewSI->setAlignment(SI->getAlignment());
1114 NewSI->setVolatile(SI->isVolatile());
1115 NewSI->setAtomic(SI->getOrdering(), SI->getSynchScope());
1116 DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
1117 SI->eraseFromParent();
1121 bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
1122 // This function is only called on atomic stores that are too large to be
1123 // atomic if implemented as a native store. So we replace them by an
1124 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
1125 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
1126 // It is the responsibility of the target to only signal expansion via
1127 // shouldExpandAtomicRMW in cases where this is required and possible.
1128 IRBuilder<> Builder(SI);
1130 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
1131 SI->getValueOperand(), SI->getOrdering());
1132 SI->eraseFromParent();
1134 // Now we have an appropriate swap instruction, lower it as usual.
1135 return tryExpandAtomicRMW(AI);
1138 static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
1139 Value *Loaded, Value *NewVal,
1140 AtomicOrdering MemOpOrder,
1141 Value *&Success, Value *&NewLoaded) {
1142 Value* Pair = Builder.CreateAtomicCmpXchg(
1143 Addr, Loaded, NewVal, MemOpOrder,
1144 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
1145 Success = Builder.CreateExtractValue(Pair, 1, "success");
1146 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
1149 /// Emit IR to implement the given atomicrmw operation on values in registers,
1150 /// returning the new value.
1151 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
1152 Value *Loaded, Value *Inc) {
1155 case AtomicRMWInst::Xchg:
1157 case AtomicRMWInst::Add:
1158 return Builder.CreateAdd(Loaded, Inc, "new");
1159 case AtomicRMWInst::Sub:
1160 return Builder.CreateSub(Loaded, Inc, "new");
1161 case AtomicRMWInst::And:
1162 return Builder.CreateAnd(Loaded, Inc, "new");
1163 case AtomicRMWInst::Nand:
1164 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
1165 case AtomicRMWInst::Or:
1166 return Builder.CreateOr(Loaded, Inc, "new");
1167 case AtomicRMWInst::Xor:
1168 return Builder.CreateXor(Loaded, Inc, "new");
1169 case AtomicRMWInst::Max:
1170 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
1171 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
1172 case AtomicRMWInst::Min:
1173 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
1174 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
1175 case AtomicRMWInst::UMax:
1176 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
1177 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
1178 case AtomicRMWInst::UMin:
1179 NewVal = Builder.CreateICmpULE(Loaded, Inc);
1180 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
1182 llvm_unreachable("Unknown atomic op");
1186 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
1187 switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
1188 case TargetLoweringBase::AtomicExpansionKind::None:
1190 case TargetLoweringBase::AtomicExpansionKind::LLSC:
1191 return expandAtomicOpToLLSC(AI, AI->getPointerOperand(), AI->getOrdering(),
1192 [&](IRBuilder<> &Builder, Value *Loaded) {
1193 return performAtomicOp(AI->getOperation(),
1195 AI->getValOperand());
1197 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
1198 return expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun);
1200 llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
1204 bool AtomicExpand::expandAtomicOpToLLSC(
1205 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
1206 std::function<Value *(IRBuilder<> &, Value *)> PerformOp) {
1207 BasicBlock *BB = I->getParent();
1208 Function *F = BB->getParent();
1209 LLVMContext &Ctx = F->getContext();
1211 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1213 // The standard expansion we produce is:
1217 // %loaded = @load.linked(%addr)
1218 // %new = some_op iN %loaded, %incr
1219 // %stored = @store_conditional(%new, %addr)
1220 // %try_again = icmp i32 ne %stored, 0
1221 // br i1 %try_again, label %loop, label %atomicrmw.end
1225 BasicBlock *ExitBB = BB->splitBasicBlock(I->getIterator(), "atomicrmw.end");
1226 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
1228 // This grabs the DebugLoc from I.
1229 IRBuilder<> Builder(I);
1231 // The split call above "helpfully" added a branch at the end of BB (to the
1232 // wrong place), but we might want a fence too. It's easiest to just remove
1233 // the branch entirely.
1234 std::prev(BB->end())->eraseFromParent();
1235 Builder.SetInsertPoint(BB);
1236 Builder.CreateBr(LoopBB);
1238 // Start the main loop block now that we've taken care of the preliminaries.
1239 Builder.SetInsertPoint(LoopBB);
1240 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
1242 Value *NewVal = PerformOp(Builder, Loaded);
1244 Value *StoreSuccess =
1245 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
1246 Value *TryAgain = Builder.CreateICmpNE(
1247 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
1248 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
1250 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
1252 I->replaceAllUsesWith(Loaded);
1253 I->eraseFromParent();
1258 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1259 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
1260 AtomicOrdering FailureOrder = CI->getFailureOrdering();
1261 Value *Addr = CI->getPointerOperand();
1262 BasicBlock *BB = CI->getParent();
1263 Function *F = BB->getParent();
1264 LLVMContext &Ctx = F->getContext();
1265 // If getInsertFencesForAtomic() returns true, then the target does not want
1266 // to deal with memory orders, and emitLeading/TrailingFence should take care
1267 // of everything. Otherwise, emitLeading/TrailingFence are no-op and we
1268 // should preserve the ordering.
1269 AtomicOrdering MemOpOrder =
1270 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
1272 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
1274 // The full expansion we produce is:
1278 // %loaded = @load.linked(%addr)
1279 // %should_store = icmp eq %loaded, %desired
1280 // br i1 %should_store, label %cmpxchg.trystore,
1281 // label %cmpxchg.nostore
1282 // cmpxchg.trystore:
1283 // %stored = @store_conditional(%new, %addr)
1284 // %success = icmp eq i32 %stored, 0
1285 // br i1 %success, label %cmpxchg.success, label %loop/%cmpxchg.failure
1288 // br label %cmpxchg.end
1290 // @load_linked_fail_balance()?
1291 // br label %cmpxchg.failure
1294 // br label %cmpxchg.end
1296 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
1297 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
1298 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
1300 BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end");
1301 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
1302 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB);
1303 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB);
1304 auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
1305 auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
1307 // This grabs the DebugLoc from CI
1308 IRBuilder<> Builder(CI);
1310 // The split call above "helpfully" added a branch at the end of BB (to the
1311 // wrong place), but we might want a fence too. It's easiest to just remove
1312 // the branch entirely.
1313 std::prev(BB->end())->eraseFromParent();
1314 Builder.SetInsertPoint(BB);
1315 TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
1317 Builder.CreateBr(LoopBB);
1319 // Start the main loop block now that we've taken care of the preliminaries.
1320 Builder.SetInsertPoint(LoopBB);
1321 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
1322 Value *ShouldStore =
1323 Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
1325 // If the cmpxchg doesn't actually need any ordering when it fails, we can
1326 // jump straight past that fence instruction (if it exists).
1327 Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
1329 Builder.SetInsertPoint(TryStoreBB);
1330 Value *StoreSuccess = TLI->emitStoreConditional(
1331 Builder, CI->getNewValOperand(), Addr, MemOpOrder);
1332 StoreSuccess = Builder.CreateICmpEQ(
1333 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
1334 Builder.CreateCondBr(StoreSuccess, SuccessBB,
1335 CI->isWeak() ? FailureBB : LoopBB);
1337 // Make sure later instructions don't get reordered with a fence if necessary.
1338 Builder.SetInsertPoint(SuccessBB);
1339 TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
1341 Builder.CreateBr(ExitBB);
1343 Builder.SetInsertPoint(NoStoreBB);
1344 // In the failing case, where we don't execute the store-conditional, the
1345 // target might want to balance out the load-linked with a dedicated
1346 // instruction (e.g., on ARM, clearing the exclusive monitor).
1347 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
1348 Builder.CreateBr(FailureBB);
1350 Builder.SetInsertPoint(FailureBB);
1351 TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
1353 Builder.CreateBr(ExitBB);
1355 // Finally, we have control-flow based knowledge of whether the cmpxchg
1356 // succeeded or not. We expose this to later passes by converting any
1357 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate PHI.
1359 // Setup the builder so we can create any PHIs we need.
1360 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
1361 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
1362 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
1363 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
1365 // Look for any users of the cmpxchg that are just comparing the loaded value
1366 // against the desired one, and replace them with the CFG-derived version.
1367 SmallVector<ExtractValueInst *, 2> PrunedInsts;
1368 for (auto User : CI->users()) {
1369 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
1373 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
1374 "weird extraction from { iN, i1 }");
1376 if (EV->getIndices()[0] == 0)
1377 EV->replaceAllUsesWith(Loaded);
1379 EV->replaceAllUsesWith(Success);
1381 PrunedInsts.push_back(EV);
1384 // We can remove the instructions now we're no longer iterating through them.
1385 for (auto EV : PrunedInsts)
1386 EV->eraseFromParent();
1388 if (!CI->use_empty()) {
1389 // Some use of the full struct return that we don't understand has happened,
1390 // so we've got to reconstruct it properly.
1392 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
1393 Res = Builder.CreateInsertValue(Res, Success, 1);
1395 CI->replaceAllUsesWith(Res);
1398 CI->eraseFromParent();
1402 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
1403 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
1407 AtomicRMWInst::BinOp Op = RMWI->getOperation();
1409 case AtomicRMWInst::Add:
1410 case AtomicRMWInst::Sub:
1411 case AtomicRMWInst::Or:
1412 case AtomicRMWInst::Xor:
1414 case AtomicRMWInst::And:
1415 return C->isMinusOne();
1416 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
1422 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
1423 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
1424 tryExpandAtomicLoad(ResultingLoad);
1430 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
1431 CreateCmpXchgInstFun CreateCmpXchg) {
1434 AtomicOrdering MemOpOrder =
1435 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
1436 Value *Addr = AI->getPointerOperand();
1437 BasicBlock *BB = AI->getParent();
1438 Function *F = BB->getParent();
1439 LLVMContext &Ctx = F->getContext();
1441 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1443 // The standard expansion we produce is:
1445 // %init_loaded = load atomic iN* %addr
1448 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
1449 // %new = some_op iN %loaded, %incr
1450 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
1451 // %new_loaded = extractvalue { iN, i1 } %pair, 0
1452 // %success = extractvalue { iN, i1 } %pair, 1
1453 // br i1 %success, label %atomicrmw.end, label %loop
1456 BasicBlock *ExitBB = BB->splitBasicBlock(AI->getIterator(), "atomicrmw.end");
1457 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
1459 // This grabs the DebugLoc from AI.
1460 IRBuilder<> Builder(AI);
1462 // The split call above "helpfully" added a branch at the end of BB (to the
1463 // wrong place), but we want a load. It's easiest to just remove
1464 // the branch entirely.
1465 std::prev(BB->end())->eraseFromParent();
1466 Builder.SetInsertPoint(BB);
1467 LoadInst *InitLoaded = Builder.CreateLoad(Addr);
1468 // Atomics require at least natural alignment.
1469 InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits() / 8);
1470 Builder.CreateBr(LoopBB);
1472 // Start the main loop block now that we've taken care of the preliminaries.
1473 Builder.SetInsertPoint(LoopBB);
1474 PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
1475 Loaded->addIncoming(InitLoaded, BB);
1478 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
1480 Value *NewLoaded = nullptr;
1481 Value *Success = nullptr;
1483 CreateCmpXchg(Builder, Addr, Loaded, NewVal, MemOpOrder,
1484 Success, NewLoaded);
1485 assert(Success && NewLoaded);
1487 Loaded->addIncoming(NewLoaded, LoopBB);
1489 Builder.CreateCondBr(Success, ExitBB, LoopBB);
1491 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
1493 AI->replaceAllUsesWith(NewLoaded);
1494 AI->eraseFromParent();