From: Jakob Stoklund Olesen Date: Thu, 30 Jan 2014 04:48:46 +0000 (+0000) Subject: Implement SPARCv9 atomic_swap_64 with a pseudo. X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=commitdiff_plain;h=17ca0f8f80b034131d05233076c85e958572ad4d Implement SPARCv9 atomic_swap_64 with a pseudo. The SWAP instruction only exists in a 32-bit variant, but the 64-bit atomic swap can be implemented in terms of CASX, like the other atomic rmw primitives. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200453 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 75b0167acd5..da5788d8801 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -1495,7 +1495,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM) if (Subtarget->is64Bit()) { setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal); - setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand); + setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal); setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom); } @@ -2874,6 +2874,9 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, case SP::ATOMIC_LOAD_NAND_64: return expandAtomicRMW(MI, BB, SP::ANDXrr); + case SP::ATOMIC_SWAP_64: + return expandAtomicRMW(MI, BB, 0); + case SP::ATOMIC_LOAD_MAX_32: return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G); case SP::ATOMIC_LOAD_MAX_64: @@ -3012,7 +3015,8 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr *MI, // Build the loop block. unsigned ValReg = MRI.createVirtualRegister(ValueRC); - unsigned UpdReg = MRI.createVirtualRegister(ValueRC); + // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP). + unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg); BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg) .addReg(Val0Reg).addMBB(MBB) @@ -3024,7 +3028,7 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr *MI, BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg); BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode); - } else { + } else if (Opcode) { BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg) .addReg(ValReg).addReg(Rs2Reg); } diff --git a/lib/Target/Sparc/SparcInstr64Bit.td b/lib/Target/Sparc/SparcInstr64Bit.td index 5f213342914..9146098a230 100644 --- a/lib/Target/Sparc/SparcInstr64Bit.td +++ b/lib/Target/Sparc/SparcInstr64Bit.td @@ -463,6 +463,14 @@ defm ATOMIC_LOAD_MAX : AtomicRMW; defm ATOMIC_LOAD_UMIN : AtomicRMW; defm ATOMIC_LOAD_UMAX : AtomicRMW; +// There is no 64-bit variant of SWAP, so use a pseudo. +let usesCustomInserter = 1, hasCtrlDep = 1, mayLoad = 1, mayStore = 1, + Defs = [ICC], Predicates = [Is64Bit] in +def ATOMIC_SWAP_64 : Pseudo<(outs I64Regs:$rd), + (ins ptr_rc:$addr, I64Regs:$rs2), "", + [(set i64:$rd, + (atomic_swap_64 iPTR:$addr, i64:$rs2))]>; + // Global addresses, constant pool entries let Predicates = [Is64Bit] in { diff --git a/test/CodeGen/SPARC/atomics.ll b/test/CodeGen/SPARC/atomics.ll index 6d2b48287ca..b10336c9808 100644 --- a/test/CodeGen/SPARC/atomics.ll +++ b/test/CodeGen/SPARC/atomics.ll @@ -62,6 +62,15 @@ entry: ret i32 %b } +; CHECK-LABEL: test_swap_i64 +; CHECK: casx [%o1], + +define i64 @test_swap_i64(i64 %a, i64* %ptr) { +entry: + %b = atomicrmw xchg i64* %ptr, i64 42 monotonic + ret i64 %b +} + ; CHECK-LABEL: test_load_add_32 ; CHECK: membar ; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]