From: Marek Olsak Date: Wed, 25 Nov 2015 21:22:45 +0000 (+0000) Subject: AMDGPU/SI: select S_ABS_I32 when possible (v2) X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;ds=inline;h=73f0848ca2cf7676d72e8cd6f74f4896d5007756;p=oota-llvm.git AMDGPU/SI: select S_ABS_I32 when possible (v2) v2: added more tests, moved the SALU->VALU conversion to a separate function It looks like it's not possible to get subregisters in the S_ABS lowering code, and I don't feel like guessing without testing what the correct code would look like. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@254095 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp index adf114e02d6..9a85a1d515f 100644 --- a/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -2412,6 +2412,11 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const { } break; + case AMDGPU::S_ABS_I32: + lowerScalarAbs(Worklist, Inst); + Inst->eraseFromParent(); + continue; + case AMDGPU::S_BFE_U64: case AMDGPU::S_BFM_B64: llvm_unreachable("Moving this op to VALU not implemented"); @@ -2497,6 +2502,30 @@ const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const { return &AMDGPU::VGPR_32RegClass; } +void SIInstrInfo::lowerScalarAbs(SmallVectorImpl &Worklist, + MachineInstr *Inst) const { + MachineBasicBlock &MBB = *Inst->getParent(); + MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); + MachineBasicBlock::iterator MII = Inst; + DebugLoc DL = Inst->getDebugLoc(); + + MachineOperand &Dest = Inst->getOperand(0); + MachineOperand &Src = Inst->getOperand(1); + unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); + + BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg) + .addImm(0) + .addReg(Src.getReg()); + + BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) + .addReg(Src.getReg()) + .addReg(TmpReg); + + MRI.replaceRegWith(Dest.getReg(), ResultReg); + addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); +} + void SIInstrInfo::splitScalar64BitUnaryOp( SmallVectorImpl &Worklist, MachineInstr *Inst, diff --git a/lib/Target/AMDGPU/SIInstrInfo.h b/lib/Target/AMDGPU/SIInstrInfo.h index 7fcf723af22..2bce87f3bd0 100644 --- a/lib/Target/AMDGPU/SIInstrInfo.h +++ b/lib/Target/AMDGPU/SIInstrInfo.h @@ -41,6 +41,9 @@ private: void swapOperands(MachineBasicBlock::iterator Inst) const; + void lowerScalarAbs(SmallVectorImpl &Worklist, + MachineInstr *Inst) const; + void splitScalar64BitUnaryOp(SmallVectorImpl &Worklist, MachineInstr *Inst, unsigned Opcode) const; diff --git a/lib/Target/AMDGPU/SIInstructions.td b/lib/Target/AMDGPU/SIInstructions.td index 98d74a217ac..bc2b0c6c07f 100644 --- a/lib/Target/AMDGPU/SIInstructions.td +++ b/lib/Target/AMDGPU/SIInstructions.td @@ -2174,6 +2174,11 @@ def : Pat < (S_MOV_B32 0), sub1)) >; +def : Pat < + (i32 (smax i32:$x, (i32 (ineg i32:$x)))), + (S_ABS_I32 $x) +>; + //===----------------------------------------------------------------------===// // SOP2 Patterns //===----------------------------------------------------------------------===// diff --git a/test/CodeGen/AMDGPU/llvm.AMDGPU.abs.ll b/test/CodeGen/AMDGPU/llvm.AMDGPU.abs.ll index 8bf094b8bc7..ca8ddbae9fb 100644 --- a/test/CodeGen/AMDGPU/llvm.AMDGPU.abs.ll +++ b/test/CodeGen/AMDGPU/llvm.AMDGPU.abs.ll @@ -8,9 +8,7 @@ declare i32 @llvm.AMDGPU.abs(i32) nounwind readnone declare i32 @llvm.AMDIL.abs.i32(i32) nounwind readnone ; FUNC-LABEL: {{^}}s_abs_i32: -; SI: s_sub_i32 -; SI: s_max_i32 -; SI: s_endpgm +; SI: s_abs_i32 ; EG: SUB_INT ; EG: MAX_INT diff --git a/test/CodeGen/AMDGPU/sminmax.ll b/test/CodeGen/AMDGPU/sminmax.ll new file mode 100644 index 00000000000..3dd49aafa4d --- /dev/null +++ b/test/CodeGen/AMDGPU/sminmax.ll @@ -0,0 +1,156 @@ +; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s + +; FUNC-LABEL: {{^}}s_abs_i32: +; GCN: s_abs_i32 +; GCN: s_add_i32 +define void @s_abs_i32(i32 addrspace(1)* %out, i32 %val) nounwind { + %neg = sub i32 0, %val + %cond = icmp sgt i32 %val, %neg + %res = select i1 %cond, i32 %val, i32 %neg + %res2 = add i32 %res, 2 + store i32 %res2, i32 addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}v_abs_i32: +; GCN: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SRC:v[0-9]+]] +; GCN: v_max_i32_e32 {{v[0-9]+}}, [[NEG]], [[SRC]] +; GCN: v_add_i32 +define void @v_abs_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind { + %val = load i32, i32 addrspace(1)* %src, align 4 + %neg = sub i32 0, %val + %cond = icmp sgt i32 %val, %neg + %res = select i1 %cond, i32 %val, i32 %neg + %res2 = add i32 %res, 2 + store i32 %res2, i32 addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}s_abs_v2i32: +; TODO: this should use s_abs_i32 +; GCNX: s_abs_i32 +; GCNX: s_abs_i32 +; GCN: s_sub +; GCN: s_sub +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cndmask_b32 +; GCN-DAG: v_cndmask_b32 +; GCN: v_add_i32 +; GCN: v_add_i32 +define void @s_abs_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %val) nounwind { + %z0 = insertelement <2 x i32> undef, i32 0, i32 0 + %z1 = insertelement <2 x i32> %z0, i32 0, i32 1 + %t0 = insertelement <2 x i32> undef, i32 2, i32 0 + %t1 = insertelement <2 x i32> %t0, i32 2, i32 1 + %neg = sub <2 x i32> %z1, %val + %cond = icmp sgt <2 x i32> %val, %neg + %res = select <2 x i1> %cond, <2 x i32> %val, <2 x i32> %neg + %res2 = add <2 x i32> %res, %t1 + store <2 x i32> %res2, <2 x i32> addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}v_abs_v2i32: +; GCN: v_sub_i32_e32 [[NEG0:v[0-9]+]], vcc, 0, [[SRC0:v[0-9]+]] +; GCN: v_sub_i32_e32 [[NEG1:v[0-9]+]], vcc, 0, [[SRC1:v[0-9]+]] +; TODO: this should use v_max_i32 +; GCNX: v_max_i32_e32 {{v[0-9]+}}, [[NEG0]], [[SRC0]] +; GCNX: v_max_i32_e32 {{v[0-9]+}}, [[NEG1]], [[SRC1]] +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cndmask_b32 +; GCN-DAG: v_cndmask_b32 +; GCN: v_add_i32 +; GCN: v_add_i32 +define void @v_abs_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %src) nounwind { + %z0 = insertelement <2 x i32> undef, i32 0, i32 0 + %z1 = insertelement <2 x i32> %z0, i32 0, i32 1 + %t0 = insertelement <2 x i32> undef, i32 2, i32 0 + %t1 = insertelement <2 x i32> %t0, i32 2, i32 1 + %val = load <2 x i32>, <2 x i32> addrspace(1)* %src, align 4 + %neg = sub <2 x i32> %z1, %val + %cond = icmp sgt <2 x i32> %val, %neg + %res = select <2 x i1> %cond, <2 x i32> %val, <2 x i32> %neg + %res2 = add <2 x i32> %res, %t1 + store <2 x i32> %res2, <2 x i32> addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}s_abs_v4i32: +; TODO: this should use s_abs_i32 +; GCNX: s_abs_i32 +; GCNX: s_abs_i32 +; GCNX: s_abs_i32 +; GCNX: s_abs_i32 +; GCN: s_sub +; GCN: s_sub +; GCN: s_sub +; GCN: s_sub +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cndmask_b32 +; GCN-DAG: v_cndmask_b32 +; GCN-DAG: v_cndmask_b32 +; GCN-DAG: v_cndmask_b32 +; GCN: v_add_i32 +; GCN: v_add_i32 +define void @s_abs_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %val) nounwind { + %z0 = insertelement <4 x i32> undef, i32 0, i32 0 + %z1 = insertelement <4 x i32> %z0, i32 0, i32 1 + %z2 = insertelement <4 x i32> %z1, i32 0, i32 2 + %z3 = insertelement <4 x i32> %z2, i32 0, i32 3 + %t0 = insertelement <4 x i32> undef, i32 2, i32 0 + %t1 = insertelement <4 x i32> %t0, i32 2, i32 1 + %t2 = insertelement <4 x i32> %t1, i32 2, i32 2 + %t3 = insertelement <4 x i32> %t2, i32 2, i32 3 + %neg = sub <4 x i32> %z3, %val + %cond = icmp sgt <4 x i32> %val, %neg + %res = select <4 x i1> %cond, <4 x i32> %val, <4 x i32> %neg + %res2 = add <4 x i32> %res, %t3 + store <4 x i32> %res2, <4 x i32> addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}v_abs_v4i32: +; GCN: v_sub_i32_e32 [[NEG0:v[0-9]+]], vcc, 0, [[SRC0:v[0-9]+]] +; GCN: v_sub_i32_e32 [[NEG1:v[0-9]+]], vcc, 0, [[SRC1:v[0-9]+]] +; GCN: v_sub_i32_e32 [[NEG2:v[0-9]+]], vcc, 0, [[SRC2:v[0-9]+]] +; GCN: v_sub_i32_e32 [[NEG3:v[0-9]+]], vcc, 0, [[SRC3:v[0-9]+]] +; TODO: this should use v_max_i32 +; GCNX: v_max_i32_e32 {{v[0-9]+}}, [[NEG0]], [[SRC0]] +; GCNX: v_max_i32_e32 {{v[0-9]+}}, [[NEG1]], [[SRC1]] +; GCNX: v_max_i32_e32 {{v[0-9]+}}, [[NEG2]], [[SRC2]] +; GCNX: v_max_i32_e32 {{v[0-9]+}}, [[NEG3]], [[SRC3]] +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cmp_gt +; GCN-DAG: v_cndmask_b32 +; GCN-DAG: v_cndmask_b32 +; GCN-DAG: v_cndmask_b32 +; GCN-DAG: v_cndmask_b32 +; GCN: v_add_i32 +; GCN: v_add_i32 +; GCN: v_add_i32 +; GCN: v_add_i32 +define void @v_abs_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %src) nounwind { + %z0 = insertelement <4 x i32> undef, i32 0, i32 0 + %z1 = insertelement <4 x i32> %z0, i32 0, i32 1 + %z2 = insertelement <4 x i32> %z1, i32 0, i32 2 + %z3 = insertelement <4 x i32> %z2, i32 0, i32 3 + %t0 = insertelement <4 x i32> undef, i32 2, i32 0 + %t1 = insertelement <4 x i32> %t0, i32 2, i32 1 + %t2 = insertelement <4 x i32> %t1, i32 2, i32 2 + %t3 = insertelement <4 x i32> %t2, i32 2, i32 3 + %val = load <4 x i32>, <4 x i32> addrspace(1)* %src, align 4 + %neg = sub <4 x i32> %z3, %val + %cond = icmp sgt <4 x i32> %val, %neg + %res = select <4 x i1> %cond, <4 x i32> %val, <4 x i32> %neg + %res2 = add <4 x i32> %res, %t3 + store <4 x i32> %res2, <4 x i32> addrspace(1)* %out, align 4 + ret void +}