From 108ecf397578946a5f34fc90f22cdd0c9ca4448a Mon Sep 17 00:00:00 2001 From: Bill Wendling Date: Tue, 19 Aug 2008 23:09:18 +0000 Subject: [PATCH] Add support for the __sync_sub_and_fetch atomics and friends for X86. The code was already present, but not hooked up to anything. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55018 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/TargetSelectionDAG.td | 31 +++++++++++++++++++--- lib/Target/X86/X86Instr64bit.td | 13 ++++++--- lib/Target/X86/X86InstrInfo.td | 16 +++++++++++ test/CodeGen/X86/2008-08-19-SubAndFetch.ll | 11 ++++++++ 4 files changed, 65 insertions(+), 6 deletions(-) create mode 100644 test/CodeGen/X86/2008-08-19-SubAndFetch.ll diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td index 5dba0bc9c05..eaca86a232c 100644 --- a/lib/Target/TargetSelectionDAG.td +++ b/lib/Target/TargetSelectionDAG.td @@ -358,10 +358,10 @@ def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; -def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; +def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2, + [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2, @@ -815,6 +815,32 @@ def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc), return false; }]>; +def atomic_load_sub_8 : PatFrag<(ops node:$ptr, node:$dec), + (atomic_load_sub node:$ptr, node:$dec), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getValueType(0) == MVT::i8; + return false; +}]>; +def atomic_load_sub_16 : PatFrag<(ops node:$ptr, node:$dec), + (atomic_load_sub node:$ptr, node:$dec), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getValueType(0) == MVT::i16; + return false; +}]>; +def atomic_load_sub_32 : PatFrag<(ops node:$ptr, node:$dec), + (atomic_load_sub node:$ptr, node:$dec), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getValueType(0) == MVT::i32; + return false; +}]>; +def atomic_load_sub_64 : PatFrag<(ops node:$ptr, node:$dec), + (atomic_load_sub node:$ptr, node:$dec), [{ + if (AtomicSDNode* V = dyn_cast(N)) + return V->getValueType(0) == MVT::i64; + return false; +}]>; + + def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc), (atomic_swap node:$ptr, node:$inc), [{ if (AtomicSDNode* V = dyn_cast(N)) @@ -841,7 +867,6 @@ def atomic_swap_64 : PatFrag<(ops node:$ptr, node:$inc), }]>; - // setcc convenience fragments. def setoeq : PatFrag<(ops node:$lhs, node:$rhs), (setcc node:$lhs, node:$rhs, SETOEQ)>; diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index d651b214cb6..81abc291fc1 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -1133,18 +1133,25 @@ def TLS_addr64 : I<0, Pseudo, (outs GR64:$dst), (ins i64imm:$sym), let Defs = [RAX, EFLAGS], Uses = [RAX] in { def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap), - "lock\n\tcmpxchgq $swap,$ptr", + "lock\n\tcmpxchgq\t$swap,$ptr", [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK; } let Constraints = "$val = $dst" in { let Defs = [EFLAGS] in def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), - "lock\n\txadd $val, $ptr", + "lock\n\txadd\t$val, $ptr", [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>, TB, LOCK; + +let Defs = [EFLAGS] in +def LXSUB64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), + "lock\n\txadd\t$val, $ptr", + [(set GR64:$dst, (atomic_load_sub_64 addr:$ptr, GR64:$val))]>, + TB, LOCK; + def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), - "xchg $val, $ptr", + "xchg\t$val, $ptr", [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>; } diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 37a5fed51c2..7b5ee91e9be 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -2634,6 +2634,22 @@ def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val), TB, LOCK; } +// Atomic exchange and subtract +let Constraints = "$val = $dst", Defs = [EFLAGS] in { +def LXSUB32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), + "lock\n\txadd{l}\t{$val, $ptr|$ptr, $val}", + [(set GR32:$dst, (atomic_load_sub_32 addr:$ptr, GR32:$val))]>, + TB, LOCK; +def LXSUB16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), + "lock\n\txadd{w}\t{$val, $ptr|$ptr, $val}", + [(set GR16:$dst, (atomic_load_sub_16 addr:$ptr, GR16:$val))]>, + TB, OpSize, LOCK; +def LXSUB8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val), + "lock\n\txadd{b}\t{$val, $ptr|$ptr, $val}", + [(set GR8:$dst, (atomic_load_sub_8 addr:$ptr, GR8:$val))]>, + TB, LOCK; +} + // Atomic exchange, and, or, xor let Constraints = "$val = $dst", Defs = [EFLAGS], usesCustomDAGSchedInserter = 1 in { diff --git a/test/CodeGen/X86/2008-08-19-SubAndFetch.ll b/test/CodeGen/X86/2008-08-19-SubAndFetch.ll new file mode 100644 index 00000000000..00bcdf82e8d --- /dev/null +++ b/test/CodeGen/X86/2008-08-19-SubAndFetch.ll @@ -0,0 +1,11 @@ +; RUN: llvm-as < %s | llc -march=x86-64 | grep xadd + +@var = external global i64 ; [#uses=1] + +define i32 @main() nounwind { +entry: + tail call i64 @llvm.atomic.load.sub.i64.p0i64( i64* @var, i64 1 ) ; :0 [#uses=0] + unreachable +} + +declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind -- 2.34.1