LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
BasicBlock *InsertAtEnd);
+ bool getHasSubsequentAcqlRMW() {
+ return hasSubsequentAcqlRMW_;
+ }
+
+ void setHasSubsequentAcqlRMW(bool val) {
+ hasSubsequentAcqlRMW_ = val;
+ }
+
/// isVolatile - Return true if this is a load from a volatile memory
/// location.
///
void setInstructionSubclassData(unsigned short D) {
Instruction::setInstructionSubclassData(D);
}
+
+ bool hasSubsequentAcqlRMW_;
};
//===----------------------------------------------------------------------===//
bool isIdempotentRMW(AtomicRMWInst *AI);
bool simplifyIdempotentRMW(AtomicRMWInst *AI);
};
+
+
+ // If 'LI' is a relaxed load, and it is immediately followed by a
+// atomic read-modify-write that has acq_rel parameter, we don't have to do
+// anything since the rmw serves as a natural barrier.
+void MarkRelaxedLoadBeforeAcqrelRMW(LoadInst* LI) {
+ auto* BB = LI->getParent();
+ auto BBI = LI->getIterator();
+ for (BBI++; BBI != BB->end(); BBI++) {
+ Instruction* CurInst = &*BBI;
+ if (!CurInst) {
+ return;
+ }
+ if (!CurInst->isAtomic()) {
+ continue;
+ }
+ auto* RMW = dyn_cast<AtomicRMWInst>(CurInst);
+ if (!RMW) {
+ return;
+ }
+ if (RMW->getOrdering() == AcquireRelease ||
+ RMW->getOrdering() == SequentiallyConsistent) {
+ LI->setHasSubsequentAcqlRMW(true);
+ }
+ }
+}
+
}
char AtomicExpand::ID = 0;
<< *LI << '\n');
LI->setOrdering(Acquire);
*/
- MonotonicLoadInsts.push_back(LI);
+// MonotonicLoadInsts.push_back(LI);
+ MarkRelaxedLoadBeforeAcqrelRMW(LI);
}
break;
}
switch (I->getOpcode()) {
case Instruction::Load: {
auto* LI = dyn_cast<LoadInst>(&*I);
- if (LI->getOrdering() == Monotonic) {
+ if (LI->getOrdering() == Monotonic &&
+ !LI->getHasSubsequentAcqlRMW()) {
MonotonicLoadInsts.insert(LI);
}
break;