Add support for the __sync_sub_and_fetch atomics and friends for X86. The code
authorBill Wendling <isanbard@gmail.com>
Tue, 19 Aug 2008 23:09:18 +0000 (23:09 +0000)
committerBill Wendling <isanbard@gmail.com>
Tue, 19 Aug 2008 23:09:18 +0000 (23:09 +0000)
was already present, but not hooked up to anything.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55018 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/TargetSelectionDAG.td
lib/Target/X86/X86Instr64bit.td
lib/Target/X86/X86InstrInfo.td
test/CodeGen/X86/2008-08-19-SubAndFetch.ll [new file with mode: 0644]

index 5dba0bc9c058ac62795b801b0e0e8628b285c81a..eaca86a232cdea1fd7327f95f8f585f15d081efd 100644 (file)
@@ -358,10 +358,10 @@ def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3,
                     [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
 def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2,
                     [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_swap     : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
-                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
 def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
                     [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_swap     : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
 def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2,
                     [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
 def atomic_load_or  : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2,
@@ -815,6 +815,32 @@ def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc),
   return false;
 }]>;
 
+def atomic_load_sub_8 : PatFrag<(ops node:$ptr, node:$dec),
+                    (atomic_load_sub node:$ptr, node:$dec), [{
+  if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
+        return V->getValueType(0) == MVT::i8;
+  return false;
+}]>;
+def atomic_load_sub_16 : PatFrag<(ops node:$ptr, node:$dec), 
+                    (atomic_load_sub node:$ptr, node:$dec), [{
+  if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
+        return V->getValueType(0) == MVT::i16;
+  return false;
+}]>;
+def atomic_load_sub_32 : PatFrag<(ops node:$ptr, node:$dec), 
+                    (atomic_load_sub node:$ptr, node:$dec), [{
+  if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
+        return V->getValueType(0) == MVT::i32;
+  return false;
+}]>;
+def atomic_load_sub_64 : PatFrag<(ops node:$ptr, node:$dec), 
+                    (atomic_load_sub node:$ptr, node:$dec), [{
+  if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
+        return V->getValueType(0) == MVT::i64;
+  return false;
+}]>;
+
+
 def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc),
                     (atomic_swap node:$ptr, node:$inc), [{
   if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
@@ -841,7 +867,6 @@ def atomic_swap_64 : PatFrag<(ops node:$ptr, node:$inc),
 }]>;
 
 
-
 // setcc convenience fragments.
 def setoeq : PatFrag<(ops node:$lhs, node:$rhs),
                      (setcc node:$lhs, node:$rhs, SETOEQ)>;
index d651b214cb6442443553fa80c5c1ce9b440fb8ce..81abc291fc1c2ab2c5a2e98f6cb8f6bb9049ecf1 100644 (file)
@@ -1133,18 +1133,25 @@ def TLS_addr64 : I<0, Pseudo, (outs GR64:$dst), (ins i64imm:$sym),
 
 let Defs = [RAX, EFLAGS], Uses = [RAX] in {
 def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
-               "lock\n\tcmpxchgq $swap,$ptr",
+               "lock\n\tcmpxchgq\t$swap,$ptr",
                [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
 }
 
 let Constraints = "$val = $dst" in {
 let Defs = [EFLAGS] in
 def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
-               "lock\n\txadd $val, $ptr", 
+               "lock\n\txadd\t$val, $ptr", 
                [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
                 TB, LOCK;
+
+let Defs = [EFLAGS] in
+def LXSUB64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
+               "lock\n\txadd\t$val, $ptr", 
+               [(set GR64:$dst, (atomic_load_sub_64 addr:$ptr, GR64:$val))]>,
+                TB, LOCK;
+
 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
-                  "xchg $val, $ptr", 
+                  "xchg\t$val, $ptr", 
                   [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
 }
 
index 37a5fed51c27a246d30759942682422b9a36c14d..7b5ee91e9bee997ebc63973cb2ce19e577f3a202 100644 (file)
@@ -2634,6 +2634,22 @@ def LXADD8  : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
                 TB, LOCK;
 }
 
+// Atomic exchange and subtract
+let Constraints = "$val = $dst", Defs = [EFLAGS] in {
+def LXSUB32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
+               "lock\n\txadd{l}\t{$val, $ptr|$ptr, $val}", 
+               [(set GR32:$dst, (atomic_load_sub_32 addr:$ptr, GR32:$val))]>,
+                TB, LOCK;
+def LXSUB16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
+               "lock\n\txadd{w}\t{$val, $ptr|$ptr, $val}", 
+               [(set GR16:$dst, (atomic_load_sub_16 addr:$ptr, GR16:$val))]>,
+                TB, OpSize, LOCK;
+def LXSUB8  : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
+               "lock\n\txadd{b}\t{$val, $ptr|$ptr, $val}", 
+               [(set GR8:$dst, (atomic_load_sub_8 addr:$ptr, GR8:$val))]>,
+                TB, LOCK;
+}
+
 // Atomic exchange, and, or, xor
 let Constraints = "$val = $dst", Defs = [EFLAGS],
                   usesCustomDAGSchedInserter = 1 in {
diff --git a/test/CodeGen/X86/2008-08-19-SubAndFetch.ll b/test/CodeGen/X86/2008-08-19-SubAndFetch.ll
new file mode 100644 (file)
index 0000000..00bcdf8
--- /dev/null
@@ -0,0 +1,11 @@
+; RUN: llvm-as < %s | llc -march=x86-64 | grep xadd
+
+@var = external global i64             ; <i64*> [#uses=1]
+
+define i32 @main() nounwind {
+entry:
+       tail call i64 @llvm.atomic.load.sub.i64.p0i64( i64* @var, i64 1 )               ; <i64>:0 [#uses=0]
+       unreachable
+}
+
+declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind