From 968e1f2f5b6f9379ce5c222841ca842060354e8e Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 10 Oct 2014 22:01:59 +0000 Subject: [PATCH] R600/SI: Add load / store machine optimizer pass. Currently this only functions to match simple cases where ds_read2_* / ds_write2_* instructions can be used. In the future it might match some of the other weird load patterns, such as direct to LDS loads. Currently enabled only with a subtarget feature to enable easier testing. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@219533 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/R600/AMDGPU.h | 4 + lib/Target/R600/AMDGPU.td | 6 + lib/Target/R600/AMDGPUSubtarget.cpp | 2 +- lib/Target/R600/AMDGPUSubtarget.h | 5 + lib/Target/R600/AMDGPUTargetMachine.cpp | 11 + lib/Target/R600/CMakeLists.txt | 1 + lib/Target/R600/SILoadStoreOptimizer.cpp | 375 ++++++++++++++ ...ds-negative-offset-addressing-mode-loop.ll | 10 +- test/CodeGen/R600/ds_read2.ll | 461 ++++++++++++++++++ test/CodeGen/R600/ds_write2.ll | 378 ++++++++++++++ 10 files changed, 1246 insertions(+), 7 deletions(-) create mode 100644 lib/Target/R600/SILoadStoreOptimizer.cpp create mode 100644 test/CodeGen/R600/ds_read2.ll create mode 100644 test/CodeGen/R600/ds_write2.ll diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h index ff4d6b475ec..5bc1276ffec 100644 --- a/lib/Target/R600/AMDGPU.h +++ b/lib/Target/R600/AMDGPU.h @@ -40,6 +40,7 @@ FunctionPass *createSITypeRewriter(); FunctionPass *createSIAnnotateControlFlowPass(); FunctionPass *createSILowerI1CopiesPass(); FunctionPass *createSIShrinkInstructionsPass(); +FunctionPass *createSILoadStoreOptimizerPass(TargetMachine &tm); FunctionPass *createSILowerControlFlowPass(TargetMachine &tm); FunctionPass *createSIFixSGPRCopiesPass(TargetMachine &tm); FunctionPass *createSIFixSGPRLiveRangesPass(); @@ -49,6 +50,9 @@ FunctionPass *createSIInsertWaits(TargetMachine &tm); void initializeSILowerI1CopiesPass(PassRegistry &); extern char &SILowerI1CopiesID; +void initializeSILoadStoreOptimizerPass(PassRegistry &); +extern char &SILoadStoreOptimizerID; + // Passes common to R600 and SI FunctionPass *createAMDGPUPromoteAlloca(const AMDGPUSubtarget &ST); Pass *createAMDGPUStructurizeCFGPass(); diff --git a/lib/Target/R600/AMDGPU.td b/lib/Target/R600/AMDGPU.td index 0bff35e4dee..f5930f5e485 100644 --- a/lib/Target/R600/AMDGPU.td +++ b/lib/Target/R600/AMDGPU.td @@ -81,6 +81,12 @@ def FeatureCFALUBug : SubtargetFeature<"cfalubug", "true", "GPU has CF_ALU bug">; +// XXX - This should probably be removed once enabled by default +def FeatureEnableLoadStoreOpt : SubtargetFeature <"load-store-opt", + "EnableLoadStoreOpt", + "true", + "Enable SI load/store optimizer pass">; + def FeatureFlatAddressSpace : SubtargetFeature<"flat-address-space", "FlatAddressSpace", "true", diff --git a/lib/Target/R600/AMDGPUSubtarget.cpp b/lib/Target/R600/AMDGPUSubtarget.cpp index bcafee51ae8..9d09a196370 100644 --- a/lib/Target/R600/AMDGPUSubtarget.cpp +++ b/lib/Target/R600/AMDGPUSubtarget.cpp @@ -79,7 +79,7 @@ AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef GPU, StringRef FS, FP64Denormals(false), FP32Denormals(false), CaymanISA(false), FlatAddressSpace(false), EnableIRStructurizer(true), EnablePromoteAlloca(false), EnableIfCvt(true), - WavefrontSize(0), CFALUBug(false), LocalMemorySize(0), + EnableLoadStoreOpt(false), WavefrontSize(0), CFALUBug(false), LocalMemorySize(0), DL(computeDataLayout(initializeSubtargetDependencies(GPU, FS))), FrameLowering(TargetFrameLowering::StackGrowsUp, 64 * 16, // Maximum stack alignment (long16) diff --git a/lib/Target/R600/AMDGPUSubtarget.h b/lib/Target/R600/AMDGPUSubtarget.h index 679797219dc..55a0c586d72 100644 --- a/lib/Target/R600/AMDGPUSubtarget.h +++ b/lib/Target/R600/AMDGPUSubtarget.h @@ -60,6 +60,7 @@ private: bool EnableIRStructurizer; bool EnablePromoteAlloca; bool EnableIfCvt; + bool EnableLoadStoreOpt; unsigned WavefrontSize; bool CFALUBug; int LocalMemorySize; @@ -180,6 +181,10 @@ public: return EnableIfCvt; } + bool loadStoreOptEnabled() const { + return EnableLoadStoreOpt; + } + unsigned getWavefrontSize() const { return WavefrontSize; } diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/R600/AMDGPUTargetMachine.cpp index c95a9410ff6..1b4fe832f20 100644 --- a/lib/Target/R600/AMDGPUTargetMachine.cpp +++ b/lib/Target/R600/AMDGPUTargetMachine.cpp @@ -148,6 +148,17 @@ bool AMDGPUPassConfig::addPreRegAlloc() { // SIFixSGPRCopies can generate a lot of duplicate instructions, // so we need to run MachineCSE afterwards. addPass(&MachineCSEID); + + if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) { + // Don't do this with no optimizations since it throws away debug info by + // merging nonadjacent loads. + + // This should be run after scheduling, but before register allocation. It + // also need extra copies to the address operand to be eliminated. + initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry()); + insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID); + } + addPass(createSIShrinkInstructionsPass()); addPass(createSIFixSGPRLiveRangesPass()); } diff --git a/lib/Target/R600/CMakeLists.txt b/lib/Target/R600/CMakeLists.txt index c5f4680d49c..021ce5fc863 100644 --- a/lib/Target/R600/CMakeLists.txt +++ b/lib/Target/R600/CMakeLists.txt @@ -44,6 +44,7 @@ add_llvm_target(R600CodeGen SIInsertWaits.cpp SIInstrInfo.cpp SIISelLowering.cpp + SILoadStoreOptimizer.cpp SILowerControlFlow.cpp SILowerI1Copies.cpp SIMachineFunctionInfo.cpp diff --git a/lib/Target/R600/SILoadStoreOptimizer.cpp b/lib/Target/R600/SILoadStoreOptimizer.cpp new file mode 100644 index 00000000000..7b41cde1ec0 --- /dev/null +++ b/lib/Target/R600/SILoadStoreOptimizer.cpp @@ -0,0 +1,375 @@ +//===-- SILoadStoreOptimizer.cpp ------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass tries to fuse DS instructions with close by immediate offsets. +// This will fuse operations such as +// ds_read_b32 v0, v2 offset:16 +// ds_read_b32 v1, v2 offset:32 +// ==> +// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8 +// +// +// Future improvements: +// +// - This currently relies on the scheduler to place loads and stores next to +// each other, and then only merges adjacent pairs of instructions. It would +// be good to be more flexible with interleaved instructions, and possibly run +// before scheduling. It currently missing stores of constants because loading +// the constant into the data register is placed between the stores, although +// this is arguably a scheduling problem. +// +// - Live interval recomputing seems inefficient. This currently only matches +// one pair, and recomputes live intervals and moves on to the next pair. It +// would be better to compute a list of all merges that need to occur +// +// - With a list of instructions to process, we can also merge more. If a +// cluster of loads have offsets that are too large to fit in the 8-bit +// offsets, but are close enough to fit in the 8 bits, we can add to the base +// pointer and use the new reduced offsets. +// +//===----------------------------------------------------------------------===// + +#include "AMDGPU.h" +#include "SIInstrInfo.h" +#include "SIRegisterInfo.h" +#include "llvm/CodeGen/LiveIntervalAnalysis.h" +#include "llvm/CodeGen/LiveVariables.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/Debug.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +#define DEBUG_TYPE "si-load-store-opt" + +namespace { + +class SILoadStoreOptimizer : public MachineFunctionPass { +private: + const TargetMachine *TM; + const SIInstrInfo *TII; + const SIRegisterInfo *TRI; + MachineRegisterInfo *MRI; + LiveIntervals *LIS; + + + static bool offsetsCanBeCombined(unsigned Offset0, + unsigned Offset1, + unsigned EltSize); + + MachineBasicBlock::iterator findMatchingDSInst(MachineBasicBlock::iterator I, + unsigned EltSize); + + void updateRegDefsUses(unsigned SrcReg, + unsigned DstReg, + unsigned SubIdx); + + MachineBasicBlock::iterator mergeRead2Pair( + MachineBasicBlock::iterator I, + MachineBasicBlock::iterator Paired, + unsigned EltSize, + const MCInstrDesc &Read2InstDesc); + + MachineBasicBlock::iterator mergeWrite2Pair( + MachineBasicBlock::iterator I, + MachineBasicBlock::iterator Paired, + unsigned EltSize, + const MCInstrDesc &Write2InstDesc); + +public: + static char ID; + + SILoadStoreOptimizer() : + MachineFunctionPass(ID), + TM(nullptr), + TII(nullptr), + TRI(nullptr), + MRI(nullptr), + LIS(nullptr) { + + } + + SILoadStoreOptimizer(const TargetMachine &TM_) : + MachineFunctionPass(ID), + TM(&TM_), + TII(static_cast(TM->getSubtargetImpl()->getInstrInfo())) { + initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry()); + } + + bool optimizeBlock(MachineBasicBlock &MBB); + + bool runOnMachineFunction(MachineFunction &MF) override; + + const char *getPassName() const override { + return "SI Load / Store Optimizer"; + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + AU.addPreserved(); + AU.addPreserved(); + AU.addPreserved(); + AU.addRequired(); + + MachineFunctionPass::getAnalysisUsage(AU); + } +}; + +} // End anonymous namespace. + +INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE, + "SI Load / Store Optimizer", false, false) +INITIALIZE_PASS_DEPENDENCY(LiveIntervals) +INITIALIZE_PASS_DEPENDENCY(LiveVariables) +INITIALIZE_PASS_DEPENDENCY(SlotIndexes) +INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, + "SI Load / Store Optimizer", false, false) + +char SILoadStoreOptimizer::ID = 0; + +char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID; + +FunctionPass *llvm::createSILoadStoreOptimizerPass(TargetMachine &TM) { + return new SILoadStoreOptimizer(TM); +} + +bool SILoadStoreOptimizer::offsetsCanBeCombined(unsigned Offset0, + unsigned Offset1, + unsigned EltSize) { + // XXX - Would the same offset be OK? Is there any reason this would happen or + // be useful? + return (Offset0 != Offset1) && + isUInt<8>(Offset0 / EltSize) && + isUInt<8>(Offset1 / EltSize); +} + +MachineBasicBlock::iterator +SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I, + unsigned EltSize){ + MachineBasicBlock::iterator E = I->getParent()->end(); + MachineBasicBlock::iterator MBBI = I; + ++MBBI; + + if (MBBI->getOpcode() != I->getOpcode()) + return E; + + // Don't merge volatiles. + if (MBBI->hasOrderedMemoryRef()) + return E; + + int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr); + const MachineOperand &AddrReg0 = I->getOperand(AddrIdx); + const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx); + + // Check same base pointer. Be careful of subregisters, which can occur with + // vectors of pointers. + if (AddrReg0.getReg() == AddrReg1.getReg() && + AddrReg0.getSubReg() == AddrReg1.getSubReg()) { + int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), + AMDGPU::OpName::offset); + unsigned Offset0 = I->getOperand(OffsetIdx).getImm(); + unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm(); + + // Check both offsets fit in the reduced range. + if (offsetsCanBeCombined(Offset0, Offset1, EltSize)) + return MBBI; + } + + return E; +} + +void SILoadStoreOptimizer::updateRegDefsUses(unsigned SrcReg, + unsigned DstReg, + unsigned SubIdx) { + for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg), + E = MRI->reg_end(); I != E; ) { + MachineOperand &O = *I; + ++I; + O.substVirtReg(DstReg, SubIdx, *TRI); + } +} + +MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair( + MachineBasicBlock::iterator I, + MachineBasicBlock::iterator Paired, + unsigned EltSize, + const MCInstrDesc &Read2InstDesc) { + MachineBasicBlock *MBB = I->getParent(); + + // Be careful, since the addresses could be subregisters themselves in weird + // cases, like vectors of pointers. + const MachineOperand *AddrReg = TII->getNamedOperand(*I, AMDGPU::OpName::addr); + + unsigned DestReg0 = TII->getNamedOperand(*I, AMDGPU::OpName::vdst)->getReg(); + unsigned DestReg1 + = TII->getNamedOperand(*Paired, AMDGPU::OpName::vdst)->getReg(); + + unsigned Offset0 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm(); + unsigned Offset1 + = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm(); + + const TargetRegisterClass *SuperRC + = (EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass; + unsigned DestReg = MRI->createVirtualRegister(SuperRC); + + DebugLoc DL = I->getDebugLoc(); + MachineInstrBuilder Read2 + = BuildMI(*MBB, I, DL, Read2InstDesc, DestReg) + .addImm(0) // gds + .addOperand(*AddrReg) // addr + .addImm(Offset0 / EltSize) // offset0 + .addImm(Offset1 / EltSize) // offset1 + .addMemOperand(*I->memoperands_begin()) + .addMemOperand(*Paired->memoperands_begin()); + + LIS->InsertMachineInstrInMaps(Read2); + + unsigned SubRegIdx0 = (EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1; + unsigned SubRegIdx1 = (EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3; + updateRegDefsUses(DestReg0, DestReg, SubRegIdx0); + updateRegDefsUses(DestReg1, DestReg, SubRegIdx1); + + LIS->RemoveMachineInstrFromMaps(I); + LIS->RemoveMachineInstrFromMaps(Paired); + I->eraseFromParent(); + Paired->eraseFromParent(); + + LiveInterval &AddrRegLI = LIS->getInterval(AddrReg->getReg()); + LIS->shrinkToUses(&AddrRegLI); + + LIS->getInterval(DestReg); // Create new LI + + DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n'); + return Read2; +} + +MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair( + MachineBasicBlock::iterator I, + MachineBasicBlock::iterator Paired, + unsigned EltSize, + const MCInstrDesc &Write2InstDesc) { + MachineBasicBlock *MBB = I->getParent(); + + // Be sure to use .addOperand(), and not .addReg() with these. We want to be + // sure we preserve the subregister index and any register flags set on them. + const MachineOperand *Addr = TII->getNamedOperand(*I, AMDGPU::OpName::addr); + const MachineOperand *Data0 = TII->getNamedOperand(*I, AMDGPU::OpName::data0); + const MachineOperand *Data1 + = TII->getNamedOperand(*Paired, AMDGPU::OpName::data0); + + unsigned Offset0 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm(); + unsigned Offset1 + = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm(); + + DebugLoc DL = I->getDebugLoc(); + MachineInstrBuilder Write2 + = BuildMI(*MBB, I, DL, Write2InstDesc) + .addImm(0) // gds + .addOperand(*Addr) // addr + .addOperand(*Data0) // data0 + .addOperand(*Data1) // data1 + .addImm(Offset0 / EltSize) // offset0 + .addImm(Offset1 / EltSize) // offset1 + .addMemOperand(*I->memoperands_begin()) + .addMemOperand(*Paired->memoperands_begin()); + + // XXX - How do we express subregisters here? + unsigned OrigRegs[] = { Data0->getReg(), Data1->getReg(), Addr->getReg() }; + + LIS->RemoveMachineInstrFromMaps(I); + LIS->RemoveMachineInstrFromMaps(Paired); + I->eraseFromParent(); + Paired->eraseFromParent(); + + LIS->repairIntervalsInRange(MBB, Write2, Write2, OrigRegs); + + DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n'); + return Write2; +} + +// Scan through looking for adjacent LDS operations with constant offsets from +// the same base register. We rely on the scheduler to do the hard work of +// clustering nearby loads, and assume these are all adjacent. +bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) { + const MCInstrDesc &Read2B32Desc = TII->get(AMDGPU::DS_READ2_B32); + const MCInstrDesc &Read2B64Desc = TII->get(AMDGPU::DS_READ2_B64); + const MCInstrDesc &Write2B32Desc = TII->get(AMDGPU::DS_WRITE2_B32); + const MCInstrDesc &Write2B64Desc = TII->get(AMDGPU::DS_WRITE2_B64); + + bool Modified = false; + + for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) { + MachineInstr &MI = *I; + + // Don't combine if volatile. + if (MI.hasOrderedMemoryRef()) { + ++I; + continue; + } + + unsigned Opc = MI.getOpcode(); + if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) { + unsigned Size = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4; + MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size); + if (Match != E) { + Modified = true; + + const MCInstrDesc &Read2Desc + = (Opc == AMDGPU::DS_READ_B64) ? Read2B64Desc : Read2B32Desc; + I = mergeRead2Pair(I, Match, Size, Read2Desc); + } else { + ++I; + } + + continue; + } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) { + unsigned Size = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4; + MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size); + if (Match != E) { + Modified = true; + + const MCInstrDesc &Write2Desc + = (Opc == AMDGPU::DS_WRITE_B64) ? Write2B64Desc : Write2B32Desc; + + I = mergeWrite2Pair(I, Match, Size, Write2Desc); + } else { + ++I; + } + + continue; + } + + ++I; + } + + return Modified; +} + +bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) { + const TargetSubtargetInfo *STM = MF.getTarget().getSubtargetImpl(); + TRI = static_cast(STM->getRegisterInfo()); + TII = static_cast(STM->getInstrInfo()); + MRI = &MF.getRegInfo(); + + LIS = &getAnalysis(); + + DEBUG(dbgs() << "Running SILoadStoreOptimizer\n"); + + assert(!MRI->isSSA()); + + bool Modified = false; + + for (MachineBasicBlock &MBB : MF) + Modified |= optimizeBlock(MBB); + + return Modified; +} diff --git a/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll b/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll index 67c6738a9a9..672dfabe5d1 100644 --- a/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll +++ b/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI --check-prefix=CHECK %s -; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=CI --check-prefix=CHECK %s +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI --check-prefix=CHECK %s +; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=CI --check-prefix=CHECK %s declare i32 @llvm.r600.read.tidig.x() #0 declare void @llvm.AMDGPU.barrier.local() #1 @@ -18,10 +18,8 @@ declare void @llvm.AMDGPU.barrier.local() #1 ; SI-DAG: V_ADD_I32_e32 [[VADDR0x100:v[0-9]+]], 0x100, [[VADDR]] ; SI-DAG: DS_READ_B32 v{{[0-9]+}}, [[VADDR0x100]], 0x0 -; CI-DAG: DS_READ_B32 v{{[0-9]+}}, [[VADDR]], 0x0 -; CI-DAG: DS_READ_B32 v{{[0-9]+}}, [[VADDR]], 0x4 -; CI-DAG: DS_READ_B32 v{{[0-9]+}}, [[VADDR]], 0x80 -; CI-DAG: DS_READ_B32 v{{[0-9]+}}, [[VADDR]], 0x84 +; CI-DAG: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, [[VADDR]], 0x0, 0x1 +; CI-DAG: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, [[VADDR]], 0x20, 0x21 ; CI-DAG: DS_READ_B32 v{{[0-9]+}}, [[VADDR]], 0x100 ; CHECK: S_ENDPGM define void @signed_ds_offset_addressing_loop(float addrspace(1)* noalias nocapture %out, float addrspace(3)* noalias nocapture readonly %lptr, i32 %n) #2 { diff --git a/test/CodeGen/R600/ds_read2.ll b/test/CodeGen/R600/ds_read2.ll new file mode 100644 index 00000000000..68844250f17 --- /dev/null +++ b/test/CodeGen/R600/ds_read2.ll @@ -0,0 +1,461 @@ +; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s + +; FIXME: We don't get cases where the address was an SGPR because we +; get a copy to the address register for each one. + +@lds = addrspace(3) global [512 x float] zeroinitializer, align 4 + @lds.f64 = addrspace(3) global [512 x double] zeroinitializer, align 8 + +; SI-LABEL: @simple_read2_f32 +; SI: DS_READ2_B32 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}}, 0x0, 0x8 +; SI: S_WAITCNT lgkmcnt(0) +; SI: V_ADD_F32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]] +; SI: BUFFER_STORE_DWORD [[RESULT]] +; SI: S_ENDPGM +define void @simple_read2_f32(float addrspace(1)* %out) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + %val0 = load float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + %val1 = load float addrspace(3)* %arrayidx1, align 4 + %sum = fadd float %val0, %val1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; SI-LABEL: @simple_read2_f32_max_offset +; SI: DS_READ2_B32 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, v{{[0-9]+}}, 0x0, 0xff +; SI: S_WAITCNT lgkmcnt(0) +; SI: V_ADD_F32_e32 [[RESULT:v[0-9]+]], v[[HI_VREG]], v[[LO_VREG]] +; SI: BUFFER_STORE_DWORD [[RESULT]] +; SI: S_ENDPGM +define void @simple_read2_f32_max_offset(float addrspace(1)* %out) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + %val0 = load float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 255 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + %val1 = load float addrspace(3)* %arrayidx1, align 4 + %sum = fadd float %val0, %val1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; SI-LABEL: @simple_read2_f32_too_far +; SI-NOT DS_READ2_B32 +; SI: DS_READ_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0 +; SI: DS_READ_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x400 +; SI: S_ENDPGM +define void @simple_read2_f32_too_far(float addrspace(1)* %out) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + %val0 = load float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 256 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + %val1 = load float addrspace(3)* %arrayidx1, align 4 + %sum = fadd float %val0, %val1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; SI-LABEL: @simple_read2_f32_x2 +; SI: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, [[BASEADDR:v[0-9]+]], 0x0, 0x8 +; SI: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, [[BASEADDR]], 0xb, 0x1b +; SI: S_ENDPGM +define void @simple_read2_f32_x2(float addrspace(1)* %out) #0 { + %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1 + %idx.0 = add nsw i32 %tid.x, 0 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0 + %val0 = load float addrspace(3)* %arrayidx0, align 4 + + %idx.1 = add nsw i32 %tid.x, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1 + %val1 = load float addrspace(3)* %arrayidx1, align 4 + %sum.0 = fadd float %val0, %val1 + + %idx.2 = add nsw i32 %tid.x, 11 + %arrayidx2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2 + %val2 = load float addrspace(3)* %arrayidx2, align 4 + + %idx.3 = add nsw i32 %tid.x, 27 + %arrayidx3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3 + %val3 = load float addrspace(3)* %arrayidx3, align 4 + %sum.1 = fadd float %val2, %val3 + + %sum = fadd float %sum.0, %sum.1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %idx.0 + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; Make sure there is an instruction between the two sets of reads. +; SI-LABEL: @simple_read2_f32_x2_barrier +; SI: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, [[BASEADDR:v[0-9]+]], 0x0, 0x8 +; SI: S_BARRIER +; SI: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, [[BASEADDR]], 0xb, 0x1b +; SI: S_ENDPGM +define void @simple_read2_f32_x2_barrier(float addrspace(1)* %out) #0 { + %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1 + %idx.0 = add nsw i32 %tid.x, 0 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0 + %val0 = load float addrspace(3)* %arrayidx0, align 4 + + %idx.1 = add nsw i32 %tid.x, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1 + %val1 = load float addrspace(3)* %arrayidx1, align 4 + %sum.0 = fadd float %val0, %val1 + + call void @llvm.AMDGPU.barrier.local() #2 + + %idx.2 = add nsw i32 %tid.x, 11 + %arrayidx2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2 + %val2 = load float addrspace(3)* %arrayidx2, align 4 + + %idx.3 = add nsw i32 %tid.x, 27 + %arrayidx3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3 + %val3 = load float addrspace(3)* %arrayidx3, align 4 + %sum.1 = fadd float %val2, %val3 + + %sum = fadd float %sum.0, %sum.1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %idx.0 + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; For some reason adding something to the base address for the first +; element results in only folding the inner pair. + +; SI-LABEL: @simple_read2_f32_x2_nonzero_base +; SI: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, [[BASEADDR:v[0-9]+]], 0x2, 0x8 +; SI: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, [[BASEADDR]], 0xb, 0x1b +; SI: S_ENDPGM +define void @simple_read2_f32_x2_nonzero_base(float addrspace(1)* %out) #0 { + %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1 + %idx.0 = add nsw i32 %tid.x, 2 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0 + %val0 = load float addrspace(3)* %arrayidx0, align 4 + + %idx.1 = add nsw i32 %tid.x, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1 + %val1 = load float addrspace(3)* %arrayidx1, align 4 + %sum.0 = fadd float %val0, %val1 + + %idx.2 = add nsw i32 %tid.x, 11 + %arrayidx2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2 + %val2 = load float addrspace(3)* %arrayidx2, align 4 + + %idx.3 = add nsw i32 %tid.x, 27 + %arrayidx3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3 + %val3 = load float addrspace(3)* %arrayidx3, align 4 + %sum.1 = fadd float %val2, %val3 + + %sum = fadd float %sum.0, %sum.1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %idx.0 + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; Be careful of vectors of pointers. We don't know if the 2 pointers +; in the vectors are really the same base, so this is not safe to +; merge. +; Base pointers come from different subregister of same super +; register. We can't safely merge this. + +; SI-LABEL: @read2_ptr_is_subreg_arg_f32 +; SI-NOT: DS_READ2_B32 +; SI: DS_READ_B32 +; SI: DS_READ_B32 +; SI: S_ENDPGM +define void @read2_ptr_is_subreg_arg_f32(float addrspace(1)* %out, <2 x float addrspace(3)*> %lds.ptr) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %index.0 = insertelement <2 x i32> undef, i32 %x.i, i32 0 + %index.1 = insertelement <2 x i32> %index.0, i32 8, i32 0 + %gep = getelementptr inbounds <2 x float addrspace(3)*> %lds.ptr, <2 x i32> %index.1 + %gep.0 = extractelement <2 x float addrspace(3)*> %gep, i32 0 + %gep.1 = extractelement <2 x float addrspace(3)*> %gep, i32 1 + %val0 = load float addrspace(3)* %gep.0, align 4 + %val1 = load float addrspace(3)* %gep.1, align 4 + %add.x = add nsw i32 %x.i, 8 + %sum = fadd float %val0, %val1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; Apply a constant scalar offset after the pointer vector extract. We +; are rejecting merges that have the same, constant 0 offset, so make +; sure we are really rejecting it because of the different +; subregisters. + +; SI-LABEL: @read2_ptr_is_subreg_arg_offset_f32 +; SI-NOT: DS_READ2_B32 +; SI: DS_READ_B32 +; SI: DS_READ_B32 +; SI: S_ENDPGM +define void @read2_ptr_is_subreg_arg_offset_f32(float addrspace(1)* %out, <2 x float addrspace(3)*> %lds.ptr) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %index.0 = insertelement <2 x i32> undef, i32 %x.i, i32 0 + %index.1 = insertelement <2 x i32> %index.0, i32 8, i32 0 + %gep = getelementptr inbounds <2 x float addrspace(3)*> %lds.ptr, <2 x i32> %index.1 + %gep.0 = extractelement <2 x float addrspace(3)*> %gep, i32 0 + %gep.1 = extractelement <2 x float addrspace(3)*> %gep, i32 1 + + ; Apply an additional offset after the vector that will be more obviously folded. + %gep.1.offset = getelementptr float addrspace(3)* %gep.1, i32 8 + + %val0 = load float addrspace(3)* %gep.0, align 4 + %val1 = load float addrspace(3)* %gep.1.offset, align 4 + %add.x = add nsw i32 %x.i, 8 + %sum = fadd float %val0, %val1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; We should be able to merge in this case, but probably not worth the effort. +; SI-NOT: DS_READ2_B32 +; SI: DS_READ_B32 +; SI: DS_READ_B32 +; SI: S_ENDPGM +define void @read2_ptr_is_subreg_f32(float addrspace(1)* %out) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %ptr.0 = insertelement <2 x [512 x float] addrspace(3)*> undef, [512 x float] addrspace(3)* @lds, i32 0 + %ptr.1 = insertelement <2 x [512 x float] addrspace(3)*> %ptr.0, [512 x float] addrspace(3)* @lds, i32 1 + %x.i.v.0 = insertelement <2 x i32> undef, i32 %x.i, i32 0 + %x.i.v.1 = insertelement <2 x i32> %x.i.v.0, i32 %x.i, i32 1 + %idx = add <2 x i32> %x.i.v.1, + %gep = getelementptr inbounds <2 x [512 x float] addrspace(3)*> %ptr.1, <2 x i32> , <2 x i32> %idx + %gep.0 = extractelement <2 x float addrspace(3)*> %gep, i32 0 + %gep.1 = extractelement <2 x float addrspace(3)*> %gep, i32 1 + %val0 = load float addrspace(3)* %gep.0, align 4 + %val1 = load float addrspace(3)* %gep.1, align 4 + %add.x = add nsw i32 %x.i, 8 + %sum = fadd float %val0, %val1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; SI-LABEL: @simple_read2_f32_volatile_0 +; SI-NOT DS_READ2_B32 +; SI: DS_READ_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0 +; SI: DS_READ_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x20 +; SI: S_ENDPGM +define void @simple_read2_f32_volatile_0(float addrspace(1)* %out) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + %val0 = load volatile float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + %val1 = load float addrspace(3)* %arrayidx1, align 4 + %sum = fadd float %val0, %val1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; SI-LABEL: @simple_read2_f32_volatile_1 +; SI-NOT DS_READ2_B32 +; SI: DS_READ_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0 +; SI: DS_READ_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x20 +; SI: S_ENDPGM +define void @simple_read2_f32_volatile_1(float addrspace(1)* %out) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + %val0 = load float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + %val1 = load volatile float addrspace(3)* %arrayidx1, align 4 + %sum = fadd float %val0, %val1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; Can't fold since not correctly aligned. +; XXX: This isn't really testing anything useful now. I think CI +; allows unaligned LDS accesses, which would be a problem here. +; SI-LABEL: @unaligned_read2_f32 +; SI-NOT: DS_READ2_B32 +; SI: S_ENDPGM +define void @unaligned_read2_f32(float addrspace(1)* %out, float addrspace(3)* %lds) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %x.i + %val0 = load float addrspace(3)* %arrayidx0, align 1 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x + %val1 = load float addrspace(3)* %arrayidx1, align 1 + %sum = fadd float %val0, %val1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; SI-LABEL: @misaligned_2_simple_read2_f32 +; SI-NOT: DS_READ2_B32 +; SI: S_ENDPGM +define void @misaligned_2_simple_read2_f32(float addrspace(1)* %out, float addrspace(3)* %lds) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %x.i + %val0 = load float addrspace(3)* %arrayidx0, align 2 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x + %val1 = load float addrspace(3)* %arrayidx1, align 2 + %sum = fadd float %val0, %val1 + %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i + store float %sum, float addrspace(1)* %out.gep, align 4 + ret void +} + +; SI-LABEL: @simple_read2_f64 +; SI: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 3, {{v[0-9]+}} +; SI: DS_READ2_B64 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}}, [[VPTR]], 0x0, 0x8 +; SI: V_ADD_F64 [[RESULT:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO_VREG]]:{{[0-9]+\]}}, v{{\[[0-9]+}}:[[HI_VREG]]{{\]}} +; SI: BUFFER_STORE_DWORDX2 [[RESULT]] +; SI: S_ENDPGM +define void @simple_read2_f64(double addrspace(1)* %out) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i + %val0 = load double addrspace(3)* %arrayidx0, align 8 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x + %val1 = load double addrspace(3)* %arrayidx1, align 8 + %sum = fadd double %val0, %val1 + %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i + store double %sum, double addrspace(1)* %out.gep, align 8 + ret void +} + +; SI-LABEL: @simple_read2_f64_max_offset +; SI: DS_READ2_B64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 0x0, 0xff +; SI: S_ENDPGM +define void @simple_read2_f64_max_offset(double addrspace(1)* %out) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i + %val0 = load double addrspace(3)* %arrayidx0, align 8 + %add.x = add nsw i32 %x.i, 255 + %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x + %val1 = load double addrspace(3)* %arrayidx1, align 8 + %sum = fadd double %val0, %val1 + %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i + store double %sum, double addrspace(1)* %out.gep, align 8 + ret void +} + +; SI-LABEL: @simple_read2_f64_too_far +; SI-NOT DS_READ2_B64 +; SI: DS_READ_B64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 0x0 +; SI: DS_READ_B64 {{v\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 0x800 +; SI: S_ENDPGM +define void @simple_read2_f64_too_far(double addrspace(1)* %out) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i + %val0 = load double addrspace(3)* %arrayidx0, align 8 + %add.x = add nsw i32 %x.i, 256 + %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x + %val1 = load double addrspace(3)* %arrayidx1, align 8 + %sum = fadd double %val0, %val1 + %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i + store double %sum, double addrspace(1)* %out.gep, align 8 + ret void +} + +; Alignment only 4 +; SI-LABEL: @misaligned_read2_f64 +; SI: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, 0x0, 0x1 +; SI: DS_READ2_B32 v{{\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, 0xe, 0xf +; SI: S_ENDPGM +define void @misaligned_read2_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i + %val0 = load double addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 7 + %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x + %val1 = load double addrspace(3)* %arrayidx1, align 4 + %sum = fadd double %val0, %val1 + %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i + store double %sum, double addrspace(1)* %out.gep, align 4 + ret void +} + +@sgemm.lA = internal unnamed_addr addrspace(3) global [264 x float] zeroinitializer, align 4 +@sgemm.lB = internal unnamed_addr addrspace(3) global [776 x float] zeroinitializer, align 4 + +define void @sgemm_inner_loop_read2_sequence(float addrspace(1)* %C, i32 %lda, i32 %ldb) #0 { + %x.i = tail call i32 @llvm.r600.read.tgid.x() #1 + %y.i = tail call i32 @llvm.r600.read.tidig.y() #1 + %arrayidx44 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %x.i + %tmp16 = load float addrspace(3)* %arrayidx44, align 4 + %add47 = add nsw i32 %x.i, 1 + %arrayidx48 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add47 + %tmp17 = load float addrspace(3)* %arrayidx48, align 4 + %add51 = add nsw i32 %x.i, 16 + %arrayidx52 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add51 + %tmp18 = load float addrspace(3)* %arrayidx52, align 4 + %add55 = add nsw i32 %x.i, 17 + %arrayidx56 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add55 + %tmp19 = load float addrspace(3)* %arrayidx56, align 4 + %arrayidx60 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %y.i + %tmp20 = load float addrspace(3)* %arrayidx60, align 4 + %add63 = add nsw i32 %y.i, 1 + %arrayidx64 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add63 + %tmp21 = load float addrspace(3)* %arrayidx64, align 4 + %add67 = add nsw i32 %y.i, 32 + %arrayidx68 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add67 + %tmp22 = load float addrspace(3)* %arrayidx68, align 4 + %add71 = add nsw i32 %y.i, 33 + %arrayidx72 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add71 + %tmp23 = load float addrspace(3)* %arrayidx72, align 4 + %add75 = add nsw i32 %y.i, 64 + %arrayidx76 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add75 + %tmp24 = load float addrspace(3)* %arrayidx76, align 4 + %add79 = add nsw i32 %y.i, 65 + %arrayidx80 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add79 + %tmp25 = load float addrspace(3)* %arrayidx80, align 4 + %sum.0 = fadd float %tmp16, %tmp17 + %sum.1 = fadd float %sum.0, %tmp18 + %sum.2 = fadd float %sum.1, %tmp19 + %sum.3 = fadd float %sum.2, %tmp20 + %sum.4 = fadd float %sum.3, %tmp21 + %sum.5 = fadd float %sum.4, %tmp22 + %sum.6 = fadd float %sum.5, %tmp23 + %sum.7 = fadd float %sum.6, %tmp24 + %sum.8 = fadd float %sum.7, %tmp25 + store float %sum.8, float addrspace(1)* %C, align 4 + ret void +} + +define void @misaligned_read2_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(3)* %in) #0 { + %load = load <2 x i32> addrspace(3)* %in, align 4 + store <2 x i32> %load, <2 x i32> addrspace(1)* %out, align 8 + ret void +} + +define void @misaligned_read2_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %in) #0 { + %load = load i64 addrspace(3)* %in, align 4 + store i64 %load, i64 addrspace(1)* %out, align 8 + ret void +} + +; Function Attrs: nounwind readnone +declare i32 @llvm.r600.read.tgid.x() #1 + +; Function Attrs: nounwind readnone +declare i32 @llvm.r600.read.tgid.y() #1 + +; Function Attrs: nounwind readnone +declare i32 @llvm.r600.read.tidig.x() #1 + +; Function Attrs: nounwind readnone +declare i32 @llvm.r600.read.tidig.y() #1 + +; Function Attrs: noduplicate nounwind +declare void @llvm.AMDGPU.barrier.local() #2 + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readnone } +attributes #2 = { noduplicate nounwind } diff --git a/test/CodeGen/R600/ds_write2.ll b/test/CodeGen/R600/ds_write2.ll new file mode 100644 index 00000000000..3a3c8368682 --- /dev/null +++ b/test/CodeGen/R600/ds_write2.ll @@ -0,0 +1,378 @@ +; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s + +@lds = addrspace(3) global [512 x float] zeroinitializer, align 4 +@lds.f64 = addrspace(3) global [512 x double] zeroinitializer, align 8 + + +; SI-LABEL: @simple_write2_one_val_f32 +; SI-DAG: BUFFER_LOAD_DWORD [[VAL:v[0-9]+]] +; SI-DAG: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}} +; SI: DS_WRITE2_B32 [[VPTR]], [[VAL]], [[VAL]], 0x0, 0x8 [M0] +; SI: S_ENDPGM +define void @simple_write2_one_val_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in.gep = getelementptr float addrspace(1)* %in, i32 %x.i + %val = load float addrspace(1)* %in.gep, align 4 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + store float %val, float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + store float %val, float addrspace(3)* %arrayidx1, align 4 + ret void +} + +; SI-LABEL: @simple_write2_two_val_f32 +; SI-DAG: BUFFER_LOAD_DWORD [[VAL0:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} +; SI-DAG: BUFFER_LOAD_DWORD [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4 +; SI-DAG: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}} +; SI: DS_WRITE2_B32 [[VPTR]], [[VAL0]], [[VAL1]], 0x0, 0x8 [M0] +; SI: S_ENDPGM +define void @simple_write2_two_val_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in.gep.0 = getelementptr float addrspace(1)* %in, i32 %x.i + %in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1 + %val0 = load float addrspace(1)* %in.gep.0, align 4 + %val1 = load float addrspace(1)* %in.gep.1, align 4 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + store float %val0, float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + store float %val1, float addrspace(3)* %arrayidx1, align 4 + ret void +} + +; SI-LABEL: @simple_write2_two_val_f32_volatile_0 +; SI-NOT: DS_WRITE2_B32 +; SI: DS_WRITE_B32 {{v[0-9]+}}, {{v[0-9]+}}, 0x0 +; SI: DS_WRITE_B32 {{v[0-9]+}}, {{v[0-9]+}}, 0x20 +; SI: S_ENDPGM +define void @simple_write2_two_val_f32_volatile_0(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in0.gep = getelementptr float addrspace(1)* %in0, i32 %x.i + %in1.gep = getelementptr float addrspace(1)* %in1, i32 %x.i + %val0 = load float addrspace(1)* %in0.gep, align 4 + %val1 = load float addrspace(1)* %in1.gep, align 4 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + store volatile float %val0, float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + store float %val1, float addrspace(3)* %arrayidx1, align 4 + ret void +} + +; SI-LABEL: @simple_write2_two_val_f32_volatile_1 +; SI-NOT: DS_WRITE2_B32 +; SI: DS_WRITE_B32 {{v[0-9]+}}, {{v[0-9]+}}, 0x0 +; SI: DS_WRITE_B32 {{v[0-9]+}}, {{v[0-9]+}}, 0x20 +; SI: S_ENDPGM +define void @simple_write2_two_val_f32_volatile_1(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in0.gep = getelementptr float addrspace(1)* %in0, i32 %x.i + %in1.gep = getelementptr float addrspace(1)* %in1, i32 %x.i + %val0 = load float addrspace(1)* %in0.gep, align 4 + %val1 = load float addrspace(1)* %in1.gep, align 4 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + store float %val0, float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + store volatile float %val1, float addrspace(3)* %arrayidx1, align 4 + ret void +} + +; 2 data subregisters from different super registers. +; SI-LABEL: @simple_write2_two_val_subreg2_mixed_f32 +; SI: BUFFER_LOAD_DWORDX2 v{{\[}}[[VAL0:[0-9]+]]:{{[0-9]+\]}} +; SI: BUFFER_LOAD_DWORDX2 v{{\[[0-9]+}}:[[VAL1:[0-9]+]]{{\]}} +; SI: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}} +; SI: DS_WRITE2_B32 [[VPTR]], v[[VAL0]], v[[VAL1]], 0x0, 0x8 [M0] +; SI: S_ENDPGM +define void @simple_write2_two_val_subreg2_mixed_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in.gep.0 = getelementptr <2 x float> addrspace(1)* %in, i32 %x.i + %in.gep.1 = getelementptr <2 x float> addrspace(1)* %in.gep.0, i32 1 + %val0 = load <2 x float> addrspace(1)* %in.gep.0, align 8 + %val1 = load <2 x float> addrspace(1)* %in.gep.1, align 8 + %val0.0 = extractelement <2 x float> %val0, i32 0 + %val1.1 = extractelement <2 x float> %val1, i32 1 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + store float %val0.0, float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + store float %val1.1, float addrspace(3)* %arrayidx1, align 4 + ret void +} + +; SI-LABEL: @simple_write2_two_val_subreg2_f32 +; SI-DAG: BUFFER_LOAD_DWORDX2 v{{\[}}[[VAL0:[0-9]+]]:[[VAL1:[0-9]+]]{{\]}} +; SI-DAG: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}} +; SI: DS_WRITE2_B32 [[VPTR]], v[[VAL0]], v[[VAL1]], 0x0, 0x8 [M0] +; SI: S_ENDPGM +define void @simple_write2_two_val_subreg2_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in.gep = getelementptr <2 x float> addrspace(1)* %in, i32 %x.i + %val = load <2 x float> addrspace(1)* %in.gep, align 8 + %val0 = extractelement <2 x float> %val, i32 0 + %val1 = extractelement <2 x float> %val, i32 1 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + store float %val0, float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + store float %val1, float addrspace(3)* %arrayidx1, align 4 + ret void +} + +; SI-LABEL: @simple_write2_two_val_subreg4_f32 +; SI-DAG: BUFFER_LOAD_DWORDX4 v{{\[}}[[VAL0:[0-9]+]]:[[VAL1:[0-9]+]]{{\]}} +; SI-DAG: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}} +; SI: DS_WRITE2_B32 [[VPTR]], v[[VAL0]], v[[VAL1]], 0x0, 0x8 [M0] +; SI: S_ENDPGM +define void @simple_write2_two_val_subreg4_f32(float addrspace(1)* %C, <4 x float> addrspace(1)* %in) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in.gep = getelementptr <4 x float> addrspace(1)* %in, i32 %x.i + %val = load <4 x float> addrspace(1)* %in.gep, align 16 + %val0 = extractelement <4 x float> %val, i32 0 + %val1 = extractelement <4 x float> %val, i32 3 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + store float %val0, float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + store float %val1, float addrspace(3)* %arrayidx1, align 4 + ret void +} + +; SI-LABEL: @simple_write2_two_val_max_offset_f32 +; SI-DAG: BUFFER_LOAD_DWORD [[VAL0:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} +; SI-DAG: BUFFER_LOAD_DWORD [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x4 +; SI-DAG: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}} +; SI: DS_WRITE2_B32 [[VPTR]], [[VAL0]], [[VAL1]], 0x0, 0xff [M0] +; SI: S_ENDPGM +define void @simple_write2_two_val_max_offset_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in.gep.0 = getelementptr float addrspace(1)* %in, i32 %x.i + %in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1 + %val0 = load float addrspace(1)* %in.gep.0, align 4 + %val1 = load float addrspace(1)* %in.gep.1, align 4 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + store float %val0, float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 255 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + store float %val1, float addrspace(3)* %arrayidx1, align 4 + ret void +} + +; SI-LABEL: @simple_write2_two_val_too_far_f32 +; SI: DS_WRITE_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x0 +; SI: DS_WRITE_B32 v{{[0-9]+}}, v{{[0-9]+}}, 0x400 +; SI: S_ENDPGM +define void @simple_write2_two_val_too_far_f32(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in0.gep = getelementptr float addrspace(1)* %in0, i32 %x.i + %in1.gep = getelementptr float addrspace(1)* %in1, i32 %x.i + %val0 = load float addrspace(1)* %in0.gep, align 4 + %val1 = load float addrspace(1)* %in1.gep, align 4 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i + store float %val0, float addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 256 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x + store float %val1, float addrspace(3)* %arrayidx1, align 4 + ret void +} + +; SI-LABEL: @simple_write2_two_val_f32_x2 +; SI: DS_WRITE2_B32 [[BASEADDR:v[0-9]+]], [[VAL0:v[0-9]+]], [[VAL1:v[0-9]+]], 0x0, 0x8 +; SI-NEXT: DS_WRITE2_B32 [[BASEADDR]], [[VAL0]], [[VAL1]], 0xb, 0x1b +; SI: S_ENDPGM +define void @simple_write2_two_val_f32_x2(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 { + %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1 + %in0.gep = getelementptr float addrspace(1)* %in0, i32 %tid.x + %in1.gep = getelementptr float addrspace(1)* %in1, i32 %tid.x + %val0 = load float addrspace(1)* %in0.gep, align 4 + %val1 = load float addrspace(1)* %in1.gep, align 4 + + %idx.0 = add nsw i32 %tid.x, 0 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0 + store float %val0, float addrspace(3)* %arrayidx0, align 4 + + %idx.1 = add nsw i32 %tid.x, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1 + store float %val1, float addrspace(3)* %arrayidx1, align 4 + + %idx.2 = add nsw i32 %tid.x, 11 + %arrayidx2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2 + store float %val0, float addrspace(3)* %arrayidx2, align 4 + + %idx.3 = add nsw i32 %tid.x, 27 + %arrayidx3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3 + store float %val1, float addrspace(3)* %arrayidx3, align 4 + + ret void +} + +; SI-LABEL: @simple_write2_two_val_f32_x2_nonzero_base +; SI: DS_WRITE2_B32 [[BASEADDR:v[0-9]+]], [[VAL0:v[0-9]+]], [[VAL1:v[0-9]+]], 0x3, 0x8 +; SI-NEXT: DS_WRITE2_B32 [[BASEADDR]], [[VAL0]], [[VAL1]], 0xb, 0x1b +; SI: S_ENDPGM +define void @simple_write2_two_val_f32_x2_nonzero_base(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 { + %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1 + %in0.gep = getelementptr float addrspace(1)* %in0, i32 %tid.x + %in1.gep = getelementptr float addrspace(1)* %in1, i32 %tid.x + %val0 = load float addrspace(1)* %in0.gep, align 4 + %val1 = load float addrspace(1)* %in1.gep, align 4 + + %idx.0 = add nsw i32 %tid.x, 3 + %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0 + store float %val0, float addrspace(3)* %arrayidx0, align 4 + + %idx.1 = add nsw i32 %tid.x, 8 + %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1 + store float %val1, float addrspace(3)* %arrayidx1, align 4 + + %idx.2 = add nsw i32 %tid.x, 11 + %arrayidx2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2 + store float %val0, float addrspace(3)* %arrayidx2, align 4 + + %idx.3 = add nsw i32 %tid.x, 27 + %arrayidx3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3 + store float %val1, float addrspace(3)* %arrayidx3, align 4 + + ret void +} + +; SI-LABEL: @write2_ptr_subreg_arg_two_val_f32 +; SI-NOT: DS_WRITE2_B32 +; SI: DS_WRITE_B32 +; SI: DS_WRITE_B32 +; SI: S_ENDPGM +define void @write2_ptr_subreg_arg_two_val_f32(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1, <2 x float addrspace(3)*> %lds.ptr) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in0.gep = getelementptr float addrspace(1)* %in0, i32 %x.i + %in1.gep = getelementptr float addrspace(1)* %in1, i32 %x.i + %val0 = load float addrspace(1)* %in0.gep, align 4 + %val1 = load float addrspace(1)* %in1.gep, align 4 + + %index.0 = insertelement <2 x i32> undef, i32 %x.i, i32 0 + %index.1 = insertelement <2 x i32> %index.0, i32 8, i32 0 + %gep = getelementptr inbounds <2 x float addrspace(3)*> %lds.ptr, <2 x i32> %index.1 + %gep.0 = extractelement <2 x float addrspace(3)*> %gep, i32 0 + %gep.1 = extractelement <2 x float addrspace(3)*> %gep, i32 1 + + ; Apply an additional offset after the vector that will be more obviously folded. + %gep.1.offset = getelementptr float addrspace(3)* %gep.1, i32 8 + store float %val0, float addrspace(3)* %gep.0, align 4 + + %add.x = add nsw i32 %x.i, 8 + store float %val1, float addrspace(3)* %gep.1.offset, align 4 + ret void +} + +; SI-LABEL: @simple_write2_one_val_f64 +; SI: BUFFER_LOAD_DWORDX2 [[VAL:v\[[0-9]+:[0-9]+\]]], +; SI: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 3, v{{[0-9]+}} +; SI: DS_WRITE2_B64 [[VPTR]], [[VAL]], [[VAL]], 0x0, 0x8 [M0] +; SI: S_ENDPGM +define void @simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in.gep = getelementptr double addrspace(1)* %in, i32 %x.i + %val = load double addrspace(1)* %in.gep, align 8 + %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i + store double %val, double addrspace(3)* %arrayidx0, align 8 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x + store double %val, double addrspace(3)* %arrayidx1, align 8 + ret void +} + +; SI-LABEL: @misaligned_simple_write2_one_val_f64 +; SI-DAG: BUFFER_LOAD_DWORDX2 v{{\[}}[[VAL0:[0-9]+]]:[[VAL1:[0-9]+]]{{\]}} +; SI-DAG: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 3, v{{[0-9]+}} +; SI: DS_WRITE2_B32 [[VPTR]], v[[VAL0]], v[[VAL1]], 0x0, 0x1 [M0] +; SI: DS_WRITE2_B32 [[VPTR]], v[[VAL0]], v[[VAL1]], 0xe, 0xf [M0] +; SI: S_ENDPGM +define void @misaligned_simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in.gep = getelementptr double addrspace(1)* %in, i32 %x.i + %val = load double addrspace(1)* %in.gep, align 8 + %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i + store double %val, double addrspace(3)* %arrayidx0, align 4 + %add.x = add nsw i32 %x.i, 7 + %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x + store double %val, double addrspace(3)* %arrayidx1, align 4 + ret void +} + +; SI-LABEL: @simple_write2_two_val_f64 +; SI-DAG: BUFFER_LOAD_DWORDX2 [[VAL0:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} +; SI-DAG: BUFFER_LOAD_DWORDX2 [[VAL1:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:0x8 +; SI-DAG: V_LSHLREV_B32_e32 [[VPTR:v[0-9]+]], 3, v{{[0-9]+}} +; SI: DS_WRITE2_B64 [[VPTR]], [[VAL0]], [[VAL1]], 0x0, 0x8 [M0] +; SI: S_ENDPGM +define void @simple_write2_two_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 { + %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 + %in.gep.0 = getelementptr double addrspace(1)* %in, i32 %x.i + %in.gep.1 = getelementptr double addrspace(1)* %in.gep.0, i32 1 + %val0 = load double addrspace(1)* %in.gep.0, align 8 + %val1 = load double addrspace(1)* %in.gep.1, align 8 + %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i + store double %val0, double addrspace(3)* %arrayidx0, align 8 + %add.x = add nsw i32 %x.i, 8 + %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x + store double %val1, double addrspace(3)* %arrayidx1, align 8 + ret void +} + +@sgemm.lA = internal unnamed_addr addrspace(3) global [264 x float] zeroinitializer, align 4 +@sgemm.lB = internal unnamed_addr addrspace(3) global [776 x float] zeroinitializer, align 4 + +define void @write2_sgemm_sequence(float addrspace(1)* %C, i32 %lda, i32 %ldb, float addrspace(1)* %in) #0 { + %x.i = tail call i32 @llvm.r600.read.tgid.x() #1 + %y.i = tail call i32 @llvm.r600.read.tidig.y() #1 + %val = load float addrspace(1)* %in + %arrayidx44 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %x.i + store float %val, float addrspace(3)* %arrayidx44, align 4 + %add47 = add nsw i32 %x.i, 1 + %arrayidx48 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add47 + store float %val, float addrspace(3)* %arrayidx48, align 4 + %add51 = add nsw i32 %x.i, 16 + %arrayidx52 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add51 + store float %val, float addrspace(3)* %arrayidx52, align 4 + %add55 = add nsw i32 %x.i, 17 + %arrayidx56 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add55 + store float %val, float addrspace(3)* %arrayidx56, align 4 + %arrayidx60 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %y.i + store float %val, float addrspace(3)* %arrayidx60, align 4 + %add63 = add nsw i32 %y.i, 1 + %arrayidx64 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add63 + store float %val, float addrspace(3)* %arrayidx64, align 4 + %add67 = add nsw i32 %y.i, 32 + %arrayidx68 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add67 + store float %val, float addrspace(3)* %arrayidx68, align 4 + %add71 = add nsw i32 %y.i, 33 + %arrayidx72 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add71 + store float %val, float addrspace(3)* %arrayidx72, align 4 + %add75 = add nsw i32 %y.i, 64 + %arrayidx76 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add75 + store float %val, float addrspace(3)* %arrayidx76, align 4 + %add79 = add nsw i32 %y.i, 65 + %arrayidx80 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add79 + store float %val, float addrspace(3)* %arrayidx80, align 4 + ret void +} + +; Function Attrs: nounwind readnone +declare i32 @llvm.r600.read.tgid.x() #1 + +; Function Attrs: nounwind readnone +declare i32 @llvm.r600.read.tgid.y() #1 + +; Function Attrs: nounwind readnone +declare i32 @llvm.r600.read.tidig.x() #1 + +; Function Attrs: nounwind readnone +declare i32 @llvm.r600.read.tidig.y() #1 + +; Function Attrs: noduplicate nounwind +declare void @llvm.AMDGPU.barrier.local() #2 + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readnone } +attributes #2 = { noduplicate nounwind } -- 2.34.1