Add an atomic lowering pass
authorPeter Collingbourne <peter@pcc.me.uk>
Tue, 3 Aug 2010 16:19:16 +0000 (16:19 +0000)
committerPeter Collingbourne <peter@pcc.me.uk>
Tue, 3 Aug 2010 16:19:16 +0000 (16:19 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@110113 91177308-0d34-0410-b5e6-96231b3b80d8

docs/Passes.html
include/llvm/LinkAllPasses.h
include/llvm/Transforms/Scalar.h
lib/Transforms/Scalar/CMakeLists.txt
lib/Transforms/Scalar/LowerAtomic.cpp [new file with mode: 0644]
test/Transforms/LowerAtomic/atomic-load.ll [new file with mode: 0644]
test/Transforms/LowerAtomic/atomic-swap.ll [new file with mode: 0644]
test/Transforms/LowerAtomic/barrier.ll [new file with mode: 0644]
test/Transforms/LowerAtomic/dg.exp [new file with mode: 0644]

index 12a936a..cafa99c 100644 (file)
@@ -167,6 +167,7 @@ perl -e '$/ = undef; for (split(/\n/, <>)) { s:^ *///? ?::; print "  <p>\n" if !
 <tr><td><a href="#loop-unroll">-loop-unroll</a></td><td>Unroll loops</td></tr>
 <tr><td><a href="#loop-unswitch">-loop-unswitch</a></td><td>Unswitch loops</td></tr>
 <tr><td><a href="#loopsimplify">-loopsimplify</a></td><td>Canonicalize natural loops</td></tr>
+<tr><td><a href="#loweratomic">-loweratomic</a></td><td>Lower atomic intrinsics</td></tr>
 <tr><td><a href="#lowerinvoke">-lowerinvoke</a></td><td>Lower invoke and unwind, for unwindless code generators</td></tr>
 <tr><td><a href="#lowersetjmp">-lowersetjmp</a></td><td>Lower Set Jump</td></tr>
 <tr><td><a href="#lowerswitch">-lowerswitch</a></td><td>Lower SwitchInst's to branches</td></tr>
@@ -1547,6 +1548,24 @@ if (X &lt; 3) {</pre>
   </p>
 </div>
 
+<!-------------------------------------------------------------------------- -->
+<div class="doc_subsection">
+  <a name="loweratomic">-loweratomic: Lower atomic intrinsics
+</div>
+<div class="doc_text">
+  <p>
+  This pass lowers atomic intrinsics to non-atomic form for use in a known
+  non-preemptible environment.
+  </p>
+
+  <p>
+  The pass does not verify that the environment is non-preemptible (in
+  general this would require knowledge of the entire call graph of the
+  program including any libraries which may not be available in bitcode form);
+  it simply lowers every atomic intrinsic.
+  </p>
+</div>
+
 <!-------------------------------------------------------------------------- -->
 <div class="doc_subsection">
   <a name="lowerinvoke">-lowerinvoke: Lower invoke and unwind, for unwindless code generators</a>
index 4068772..805db4e 100644 (file)
@@ -148,6 +148,7 @@ namespace {
       (void) llvm::createABCDPass();
       (void) llvm::createLintPass();
       (void) llvm::createSinkingPass();
+      (void) llvm::createLowerAtomicPass();
 
       (void)new llvm::IntervalPartition();
       (void)new llvm::FindUsedTypes();
index 0d338b5..4d1d863 100644 (file)
@@ -338,6 +338,12 @@ FunctionPass *createABCDPass();
 //
 FunctionPass *createSinkingPass();
 
+//===----------------------------------------------------------------------===//
+//
+// LowerAtomic - Lower atomic intrinsics to non-atomic form
+//
+Pass *createLowerAtomicPass();
+
 } // End llvm namespace
 
 #endif
index 1a3b10c..c8028c2 100644 (file)
@@ -17,6 +17,7 @@ add_llvm_library(LLVMScalarOpts
   LoopStrengthReduce.cpp
   LoopUnrollPass.cpp
   LoopUnswitch.cpp
+  LowerAtomic.cpp
   MemCpyOptimizer.cpp
   Reassociate.cpp
   Reg2Mem.cpp
diff --git a/lib/Transforms/Scalar/LowerAtomic.cpp b/lib/Transforms/Scalar/LowerAtomic.cpp
new file mode 100644 (file)
index 0000000..11f5257
--- /dev/null
@@ -0,0 +1,160 @@
+//===- LowerAtomic.cpp - Lower atomic intrinsics --------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers atomic intrinsics to non-atomic form for use in a known
+// non-preemptible environment.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "loweratomic"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/BasicBlock.h"
+#include "llvm/Function.h"
+#include "llvm/Instruction.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/IRBuilder.h"
+
+using namespace llvm;
+
+namespace {
+
+bool LowerAtomicIntrinsic(CallInst *CI) {
+  IRBuilder<> Builder(CI->getParent(), CI);
+
+  Function *Callee = CI->getCalledFunction();
+  if (!Callee)
+    return false;
+
+  unsigned IID = Callee->getIntrinsicID();
+  switch (IID) {
+  case Intrinsic::memory_barrier:
+    break;
+
+  case Intrinsic::atomic_load_add:
+  case Intrinsic::atomic_load_sub:
+  case Intrinsic::atomic_load_and:
+  case Intrinsic::atomic_load_nand:
+  case Intrinsic::atomic_load_or:
+  case Intrinsic::atomic_load_xor:
+  case Intrinsic::atomic_load_max:
+  case Intrinsic::atomic_load_min:
+  case Intrinsic::atomic_load_umax:
+  case Intrinsic::atomic_load_umin: {
+    Value *Ptr = CI->getArgOperand(0);
+    Value *Delta = CI->getArgOperand(1);
+
+    LoadInst *Orig = Builder.CreateLoad(Ptr);
+    Value *Res;
+    switch (IID) {
+      case Intrinsic::atomic_load_add:
+        Res = Builder.CreateAdd(Orig, Delta);
+        break;
+      case Intrinsic::atomic_load_sub:
+        Res = Builder.CreateSub(Orig, Delta);
+        break;
+      case Intrinsic::atomic_load_and:
+        Res = Builder.CreateAnd(Orig, Delta);
+        break;
+      case Intrinsic::atomic_load_nand:
+        Res = Builder.CreateNot(Builder.CreateAnd(Orig, Delta));
+        break;
+      case Intrinsic::atomic_load_or:
+        Res = Builder.CreateOr(Orig, Delta);
+        break;
+      case Intrinsic::atomic_load_xor:
+        Res = Builder.CreateXor(Orig, Delta);
+        break;
+      case Intrinsic::atomic_load_max:
+        Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
+                                   Delta,
+                                   Orig);
+        break;
+      case Intrinsic::atomic_load_min:
+        Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
+                                   Orig,
+                                   Delta);
+        break;
+      case Intrinsic::atomic_load_umax:
+        Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
+                                   Delta,
+                                   Orig);
+        break;
+      case Intrinsic::atomic_load_umin:
+        Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
+                                   Orig,
+                                   Delta);
+        break;
+      default: assert(0 && "Unrecognized atomic modify operation");
+    }
+    Builder.CreateStore(Res, Ptr);
+
+    CI->replaceAllUsesWith(Orig);
+    break;
+  }
+
+  case Intrinsic::atomic_swap: {
+    Value *Ptr = CI->getArgOperand(0);
+    Value *Val = CI->getArgOperand(1);
+
+    LoadInst *Orig = Builder.CreateLoad(Ptr);
+    Builder.CreateStore(Val, Ptr);
+
+    CI->replaceAllUsesWith(Orig);
+    break;
+  }
+
+  case Intrinsic::atomic_cmp_swap: {
+    Value *Ptr = CI->getArgOperand(0);
+    Value *Cmp = CI->getArgOperand(1);
+    Value *Val = CI->getArgOperand(2);
+
+    LoadInst *Orig = Builder.CreateLoad(Ptr);
+    Value *Equal = Builder.CreateICmpEQ(Orig, Cmp);
+    Value *Res = Builder.CreateSelect(Equal, Val, Orig);
+    Builder.CreateStore(Res, Ptr);
+
+    CI->replaceAllUsesWith(Orig);
+    break;
+  }
+
+  default:
+    return false;
+  }
+
+  assert(CI->use_empty() &&
+         "Lowering should have eliminated any uses of the intrinsic call!");
+  CI->eraseFromParent();
+
+  return true;
+}
+
+struct LowerAtomic : public BasicBlockPass {
+  static char ID;
+  LowerAtomic() : BasicBlockPass(&ID) {}
+  bool runOnBasicBlock(BasicBlock &BB) {
+    bool Changed = false;
+    for (BasicBlock::iterator DI = BB.begin(), DE = BB.end(); DI != DE; ) {
+      Instruction *Inst = DI++;
+      if (CallInst *CI = dyn_cast<CallInst>(Inst))
+        Changed |= LowerAtomicIntrinsic(CI);
+    }
+    return Changed;
+  }
+
+};
+
+}
+
+char LowerAtomic::ID = 0;
+static RegisterPass<LowerAtomic>
+X("loweratomic", "Lower atomic intrinsics to non-atomic form");
+
+Pass *llvm::createLowerAtomicPass() { return new LowerAtomic(); }
diff --git a/test/Transforms/LowerAtomic/atomic-load.ll b/test/Transforms/LowerAtomic/atomic-load.ll
new file mode 100644 (file)
index 0000000..5b110d6
--- /dev/null
@@ -0,0 +1,40 @@
+; RUN: opt < %s -loweratomic -S | FileCheck %s
+
+declare i8 @llvm.atomic.load.add.i8.p0i8(i8* %ptr, i8 %delta)
+declare i8 @llvm.atomic.load.nand.i8.p0i8(i8* %ptr, i8 %delta)
+declare i8 @llvm.atomic.load.min.i8.p0i8(i8* %ptr, i8 %delta)
+
+define i8 @add() {
+; CHECK: @add
+  %i = alloca i8
+  %j = call i8 @llvm.atomic.load.add.i8.p0i8(i8* %i, i8 42)
+; CHECK: [[INST:%[a-z0-9]+]] = load
+; CHECK-NEXT: add
+; CHECK-NEXT: store
+  ret i8 %j
+; CHECK: ret i8 [[INST]]
+}
+
+define i8 @nand() {
+; CHECK: @nand
+  %i = alloca i8
+  %j = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* %i, i8 42)
+; CHECK: [[INST:%[a-z0-9]+]] = load
+; CHECK-NEXT: and
+; CHECK-NEXT: xor
+; CHECK-NEXT: store
+  ret i8 %j
+; CHECK: ret i8 [[INST]]
+}
+
+define i8 @min() {
+; CHECK: @min
+  %i = alloca i8
+  %j = call i8 @llvm.atomic.load.min.i8.p0i8(i8* %i, i8 42)
+; CHECK: [[INST:%[a-z0-9]+]] = load
+; CHECK-NEXT: icmp
+; CHECK-NEXT: select
+; CHECK-NEXT: store
+  ret i8 %j
+; CHECK: ret i8 [[INST]]
+}
diff --git a/test/Transforms/LowerAtomic/atomic-swap.ll b/test/Transforms/LowerAtomic/atomic-swap.ll
new file mode 100644 (file)
index 0000000..0a59c85
--- /dev/null
@@ -0,0 +1,26 @@
+; RUN: opt < %s -loweratomic -S | FileCheck %s
+
+declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* %ptr, i8 %cmp, i8 %val)
+declare i8 @llvm.atomic.swap.i8.p0i8(i8* %ptr, i8 %val)
+
+define i8 @cmpswap() {
+; CHECK: @cmpswap
+  %i = alloca i8
+  %j = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* %i, i8 0, i8 42)
+; CHECK: [[INST:%[a-z0-9]+]] = load
+; CHECK-NEXT: icmp
+; CHECK-NEXT: select
+; CHECK-NEXT: store
+  ret i8 %j
+; CHECK: ret i8 [[INST]]
+}
+
+define i8 @swap() {
+; CHECK: @swap
+  %i = alloca i8
+  %j = call i8 @llvm.atomic.swap.i8.p0i8(i8* %i, i8 42)
+; CHECK: [[INST:%[a-z0-9]+]] = load
+; CHECK-NEXT: store
+  ret i8 %j
+; CHECK: ret i8 [[INST]]
+}
diff --git a/test/Transforms/LowerAtomic/barrier.ll b/test/Transforms/LowerAtomic/barrier.ll
new file mode 100644 (file)
index 0000000..218c5ba
--- /dev/null
@@ -0,0 +1,10 @@
+; RUN: opt < %s -loweratomic -S | FileCheck %s
+
+declare void @llvm.memory.barrier(i1 %ll, i1 %ls, i1 %sl, i1 %ss, i1 %device)
+
+define void @barrier() {
+; CHECK: @barrier
+  call void @llvm.memory.barrier(i1 0, i1 0, i1 0, i1 0, i1 0)
+; CHECK-NEXT: ret
+  ret void
+}
diff --git a/test/Transforms/LowerAtomic/dg.exp b/test/Transforms/LowerAtomic/dg.exp
new file mode 100644 (file)
index 0000000..f200589
--- /dev/null
@@ -0,0 +1,3 @@
+load_lib llvm.exp
+
+RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]