Reverts more unnecessary changes
authorPeizhao Ou <peizhaoo@uci.edu>
Fri, 6 Apr 2018 00:12:07 +0000 (17:12 -0700)
committerPeizhao Ou <peizhaoo@uci.edu>
Fri, 6 Apr 2018 00:12:07 +0000 (17:12 -0700)
lib/CodeGen/AtomicExpandPass.cpp

index 077c52b19a7a3d1907cdd3dcda9eb967ccaf0f51..6152055a5bce8b8523534392ad1850a2444fad1b 100644 (file)
@@ -116,51 +116,17 @@ bool AtomicExpand::runOnFunction(Function &F) {
   TLI = TM->getSubtargetImpl(F)->getTargetLowering();
 
   SmallVector<Instruction *, 1> AtomicInsts;
-  SmallVector<LoadInst*, 1> MonotonicLoadInsts;
 
   // Changing control-flow while iterating through it is a bad idea, so gather a
   // list of all atomic instructions before we start.
   for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
-    // XXX-update: For relaxed loads, change them to acquire. This includes
-    // relaxed loads, relaxed atomic RMW & relaxed atomic compare exchange.
+    // XXX-update: For relaxed loads, mark if the relaxed load is right before
+    // an AcquireRelease RMW operation.
     if (I->isAtomic()) {
       switch (I->getOpcode()) {
-        case Instruction::AtomicCmpXchg: {
-          // XXX-comment: AtomicCmpXchg in AArch64 will be translated to a
-          // conditional branch that contains the value of the load anyway, so
-          // we don't need to do anything.
-          /*
-          auto* CmpXchg = dyn_cast<AtomicCmpXchgInst>(&*I);
-          auto SuccOrdering = CmpXchg->getSuccessOrdering();
-          if (SuccOrdering == Monotonic) {
-            CmpXchg->setSuccessOrdering(Acquire);
-          } else if (SuccOrdering == Release) {
-            CmpXchg->setSuccessOrdering(AcquireRelease);
-          }
-          */
-          break;
-        }
-        case Instruction::AtomicRMW: {
-          // XXX-comment: Similar to AtomicCmpXchg. These instructions in
-          // AArch64 will be translated to a loop whose condition depends on the
-          // store status, which further depends on the load value.
-          /*
-          auto* RMW = dyn_cast<AtomicRMWInst>(&*I);
-          if (RMW->getOrdering() == Monotonic) {
-            RMW->setOrdering(Acquire);
-          }
-          */
-          break;
-        }
         case Instruction::Load: {
           auto* LI = dyn_cast<LoadInst>(&*I);
           if (LI->getOrdering() == Monotonic) {
-            /*
-            DEBUG(dbgs() << "Transforming relaxed loads to acquire loads: "
-                         << *LI << '\n');
-            LI->setOrdering(Acquire);
-            */
-//            MonotonicLoadInsts.push_back(LI);
             MarkRelaxedLoadBeforeAcqrelRMW(LI);
           }
           break;
@@ -187,7 +153,7 @@ bool AtomicExpand::runOnFunction(Function &F) {
     if (TLI->getInsertFencesForAtomic()) {
       if (LI && isAtLeastAcquire(LI->getOrdering())) {
         FenceOrdering = LI->getOrdering();
-//        AddFakeConditionalBranch(
+        LI->setOrdering(Monotonic);
         IsStore = false;
         IsLoad = true;
       } else if (SI && isAtLeastRelease(SI->getOrdering())) {
@@ -255,7 +221,6 @@ bool AtomicExpand::runOnFunction(Function &F) {
       MadeChange |= expandAtomicCmpXchg(CASI);
     }
   }
-
   return MadeChange;
 }