From: Tim Northover Date: Tue, 1 Jul 2014 21:44:59 +0000 (+0000) Subject: X86: delegate expanding atomic libcalls to generic code. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=99ec36c684bd658b564d7db50463917a754daaab;p=oota-llvm.git X86: delegate expanding atomic libcalls to generic code. On targets without cmpxchg16b or cmpxchg8b, the borderline atomic operations were slipping through the gaps. X86AtomicExpand.cpp was delegating to ISelLowering. Generic ISelLowering was delegating to X86ISelLowering and X86ISelLowering was asserting. The correct behaviour is to expand to a libcall, preferably in generic ISelLowering. This can be achieved by X86ISelLowering deciding it doesn't want the faff after all. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212134 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 10538ffc27f..fe8cebca942 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -16360,6 +16360,20 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, Results.push_back(EFLAGS.getValue(1)); return; } + case ISD::ATOMIC_SWAP: + case ISD::ATOMIC_LOAD_ADD: + case ISD::ATOMIC_LOAD_SUB: + case ISD::ATOMIC_LOAD_AND: + case ISD::ATOMIC_LOAD_OR: + case ISD::ATOMIC_LOAD_XOR: + case ISD::ATOMIC_LOAD_NAND: + case ISD::ATOMIC_LOAD_MIN: + case ISD::ATOMIC_LOAD_MAX: + case ISD::ATOMIC_LOAD_UMIN: + case ISD::ATOMIC_LOAD_UMAX: + // Delegate to generic TypeLegalization. Situations we can really handle + // should have already been dealt with by X86AtomicExpand.cpp. + break; case ISD::ATOMIC_LOAD: { ReplaceATOMIC_LOAD(N, Results, DAG); return; diff --git a/test/CodeGen/X86/atomic-ops-ancient-64.ll b/test/CodeGen/X86/atomic-ops-ancient-64.ll new file mode 100644 index 00000000000..18749b90287 --- /dev/null +++ b/test/CodeGen/X86/atomic-ops-ancient-64.ll @@ -0,0 +1,43 @@ +; RUN: llc -mtriple=i386-linux-gnu %s -o - | FileCheck %s + +define i64 @test_add(i64* %addr, i64 %inc) { +; CHECK-LABEL: test_add: +; CHECK: calll __sync_fetch_and_add_8 + %old = atomicrmw add i64* %addr, i64 %inc seq_cst + ret i64 %old +} + +define i64 @test_sub(i64* %addr, i64 %inc) { +; CHECK-LABEL: test_sub: +; CHECK: calll __sync_fetch_and_sub_8 + %old = atomicrmw sub i64* %addr, i64 %inc seq_cst + ret i64 %old +} + +define i64 @test_and(i64* %andr, i64 %inc) { +; CHECK-LABEL: test_and: +; CHECK: calll __sync_fetch_and_and_8 + %old = atomicrmw and i64* %andr, i64 %inc seq_cst + ret i64 %old +} + +define i64 @test_or(i64* %orr, i64 %inc) { +; CHECK-LABEL: test_or: +; CHECK: calll __sync_fetch_and_or_8 + %old = atomicrmw or i64* %orr, i64 %inc seq_cst + ret i64 %old +} + +define i64 @test_xor(i64* %xorr, i64 %inc) { +; CHECK-LABEL: test_xor: +; CHECK: calll __sync_fetch_and_xor_8 + %old = atomicrmw xor i64* %xorr, i64 %inc seq_cst + ret i64 %old +} + +define i64 @test_nand(i64* %nandr, i64 %inc) { +; CHECK-LABEL: test_nand: +; CHECK: calll __sync_fetch_and_nand_8 + %old = atomicrmw nand i64* %nandr, i64 %inc seq_cst + ret i64 %old +}