X86: remove temporary atomicrmw used during lowering.
authorTim Northover <tnorthover@apple.com>
Mon, 14 Jul 2014 15:31:13 +0000 (15:31 +0000)
committerTim Northover <tnorthover@apple.com>
Mon, 14 Jul 2014 15:31:13 +0000 (15:31 +0000)
We construct a temporary "atomicrmw xchg" instruction when lowering atomic
stores for widths that aren't supported natively. This isn't on the top-level
worklist though, so it won't be removed automatically and we have to do it
ourselves once that itself has been lowered.

Thanks Saleem for pointing this out!

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212948 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/X86/X86AtomicExpandPass.cpp
test/CodeGen/X86/atomic128.ll

index 1637b55b6d353e98946979d862a214c962a95817..61eefbbf75b19f090d5feee2c9267d295d56dc94 100644 (file)
@@ -277,8 +277,11 @@ bool X86AtomicExpandPass::expandAtomicStore(StoreInst *SI) {
                               SI->getValueOperand(), Order);
 
   // Now we have an appropriate swap instruction, lower it as usual.
-  if (shouldExpandAtomicRMW(AI))
-    return expandAtomicRMW(AI);
+  if (shouldExpandAtomicRMW(AI)) {
+    expandAtomicRMW(AI);
+    AI->eraseFromParent();
+    return true;
+  }
 
   return AI;
 }
index ddc53a532028b0827013df7c854caa53c55773fa..741d2904229d73f0836d11b0ca25358c5d4a2da9 100644 (file)
@@ -277,6 +277,7 @@ define void @atomic_store_seq_cst(i128* %p, i128 %in) {
 ; CHECK:         lock
 ; CHECK:         cmpxchg16b (%rdi)
 ; CHECK:         jne [[LOOP]]
+; CHECK-NOT:     callq ___sync_lock_test_and_set_16
 
    store atomic i128 %in, i128* %p seq_cst, align 16
    ret void