From b13f72aa2daaf9988aeb599dcdbe15ba9e5f82ee Mon Sep 17 00:00:00 2001 From: Krzysztof Parzyszek Date: Thu, 9 Jul 2015 14:51:21 +0000 Subject: [PATCH] [Hexagon] Add support for atomic RMW operations git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@241804 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/Hexagon/HexagonISelLowering.cpp | 42 ++++++++++++ lib/Target/Hexagon/HexagonISelLowering.h | 15 +++++ lib/Target/Hexagon/HexagonTargetMachine.cpp | 3 +- test/CodeGen/Hexagon/Atomics.ll | 71 +++++++++++++++++++++ 4 files changed, 130 insertions(+), 1 deletion(-) create mode 100644 test/CodeGen/Hexagon/Atomics.ll diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index ec6010cbd42..c739afb70c1 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -2466,3 +2466,45 @@ bool llvm::isPositiveHalfWord(SDNode *N) { return true; } } + +Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, + AtomicOrdering Ord) const { + BasicBlock *BB = Builder.GetInsertBlock(); + Module *M = BB->getParent()->getParent(); + Type *Ty = cast(Addr->getType())->getElementType(); + unsigned SZ = Ty->getPrimitiveSizeInBits(); + assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported"); + Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked + : Intrinsic::hexagon_L4_loadd_locked; + Value *Fn = Intrinsic::getDeclaration(M, IntID); + return Builder.CreateCall(Fn, Addr, "larx"); +} + +/// Perform a store-conditional operation to Addr. Return the status of the +/// store. This should be 0 if the store succeeded, non-zero otherwise. +Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder, + Value *Val, Value *Addr, AtomicOrdering Ord) const { + BasicBlock *BB = Builder.GetInsertBlock(); + Module *M = BB->getParent()->getParent(); + Type *Ty = Val->getType(); + unsigned SZ = Ty->getPrimitiveSizeInBits(); + assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported"); + Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked + : Intrinsic::hexagon_S4_stored_locked; + Value *Fn = Intrinsic::getDeclaration(M, IntID); + Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx"); + Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), ""); + Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext())); + return Ext; +} + +bool HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { + // Do not expand loads and stores that don't exceed 64 bits. + return LI->getType()->getPrimitiveSizeInBits() > 64; +} + +bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { + // Do not expand loads and stores that don't exceed 64 bits. + return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64; +} + diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h index ba8b9cd9fe5..2642abffadd 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.h +++ b/lib/Target/Hexagon/HexagonISelLowering.h @@ -207,6 +207,21 @@ bool isPositiveHalfWord(SDNode *N); /// compare a register against the immediate without having to materialize /// the immediate into a register. bool isLegalICmpImmediate(int64_t Imm) const override; + + // Handling of atomic RMW instructions. + bool hasLoadLinkedStoreConditional() const override { + return true; + } + Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, + AtomicOrdering Ord) const override; + Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, + Value *Addr, AtomicOrdering Ord) const override; + bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override; + bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + AtomicRMWExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) + const override { + return AtomicRMWExpansionKind::LLSC; + } }; } // end namespace llvm diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp index 59007be934e..7d9d02d49f4 100644 --- a/lib/Target/Hexagon/HexagonTargetMachine.cpp +++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp @@ -144,8 +144,9 @@ TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) { void HexagonPassConfig::addIRPasses() { TargetPassConfig::addIRPasses(); - bool NoOpt = (getOptLevel() == CodeGenOpt::None); + + addPass(createAtomicExpandPass(TM)); if (!NoOpt && EnableCommGEP) addPass(createHexagonCommonGEP()); } diff --git a/test/CodeGen/Hexagon/Atomics.ll b/test/CodeGen/Hexagon/Atomics.ll new file mode 100644 index 00000000000..bbac5d73c86 --- /dev/null +++ b/test/CodeGen/Hexagon/Atomics.ll @@ -0,0 +1,71 @@ +; RUN: llc < %s -march=hexagon + +@si = common global i32 0, align 4 +@sll = common global i64 0, align 8 + +define void @test_op_ignore() nounwind { +entry: + %t00 = atomicrmw add i32* @si, i32 1 monotonic + %t01 = atomicrmw add i64* @sll, i64 1 monotonic + %t10 = atomicrmw sub i32* @si, i32 1 monotonic + %t11 = atomicrmw sub i64* @sll, i64 1 monotonic + %t20 = atomicrmw or i32* @si, i32 1 monotonic + %t21 = atomicrmw or i64* @sll, i64 1 monotonic + %t30 = atomicrmw xor i32* @si, i32 1 monotonic + %t31 = atomicrmw xor i64* @sll, i64 1 monotonic + %t40 = atomicrmw and i32* @si, i32 1 monotonic + %t41 = atomicrmw and i64* @sll, i64 1 monotonic + %t50 = atomicrmw nand i32* @si, i32 1 monotonic + %t51 = atomicrmw nand i64* @sll, i64 1 monotonic + br label %return + +return: ; preds = %entry + ret void +} + +define void @test_fetch_and_op() nounwind { +entry: + %t00 = atomicrmw add i32* @si, i32 11 monotonic + store i32 %t00, i32* @si, align 4 + %t01 = atomicrmw add i64* @sll, i64 11 monotonic + store i64 %t01, i64* @sll, align 8 + %t10 = atomicrmw sub i32* @si, i32 11 monotonic + store i32 %t10, i32* @si, align 4 + %t11 = atomicrmw sub i64* @sll, i64 11 monotonic + store i64 %t11, i64* @sll, align 8 + %t20 = atomicrmw or i32* @si, i32 11 monotonic + store i32 %t20, i32* @si, align 4 + %t21 = atomicrmw or i64* @sll, i64 11 monotonic + store i64 %t21, i64* @sll, align 8 + %t30 = atomicrmw xor i32* @si, i32 11 monotonic + store i32 %t30, i32* @si, align 4 + %t31 = atomicrmw xor i64* @sll, i64 11 monotonic + store i64 %t31, i64* @sll, align 8 + %t40 = atomicrmw and i32* @si, i32 11 monotonic + store i32 %t40, i32* @si, align 4 + %t41 = atomicrmw and i64* @sll, i64 11 monotonic + store i64 %t41, i64* @sll, align 8 + %t50 = atomicrmw nand i32* @si, i32 11 monotonic + store i32 %t50, i32* @si, align 4 + %t51 = atomicrmw nand i64* @sll, i64 11 monotonic + store i64 %t51, i64* @sll, align 8 + br label %return + +return: ; preds = %entry + ret void +} + +define void @test_lock() nounwind { +entry: + %t00 = atomicrmw xchg i32* @si, i32 1 monotonic + store i32 %t00, i32* @si, align 4 + %t01 = atomicrmw xchg i64* @sll, i64 1 monotonic + store i64 %t01, i64* @sll, align 8 + fence seq_cst + store volatile i32 0, i32* @si, align 4 + store volatile i64 0, i64* @sll, align 8 + br label %return + +return: ; preds = %entry + ret void +} -- 2.34.1