X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FR600%2FSIInstrInfo.td;h=cf45b30c0b5217874bec0ed9a4c5bcc9c66e1fc7;hb=29d48e6a49978158e2ff49aed566b80910861eef;hp=d68fbff667f112712466351b93fe8f8e27b9a28f;hpb=749428f852b63a98e872ba69b0c576b26b7b7518;p=oota-llvm.git diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/R600/SIInstrInfo.td index d68fbff667f..cf45b30c0b5 100644 --- a/lib/Target/R600/SIInstrInfo.td +++ b/lib/Target/R600/SIInstrInfo.td @@ -7,13 +7,58 @@ // //===----------------------------------------------------------------------===// +// Execpt for the NONE field, this must be kept in sync with the SISubtarget enum +// in AMDGPUMCInstLower.h +def SISubtarget { + int NONE = -1; + int SI = 0; +} + //===----------------------------------------------------------------------===// // SI DAG Nodes //===----------------------------------------------------------------------===// -// SMRD takes a 64bit memory address and can only add an 32bit offset -def SIadd64bit32bit : SDNode<"ISD::ADD", - SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisVT<0, i64>, SDTCisVT<2, i32>]> +def SIload_constant : SDNode<"AMDGPUISD::LOAD_CONSTANT", + SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisVT<1, v4i32>, SDTCisVT<2, i32>]>, + [SDNPMayLoad, SDNPMemOperand] +>; + +def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT", + SDTypeProfile<0, 13, + [SDTCisVT<0, v4i32>, // rsrc(SGPR) + SDTCisVT<1, iAny>, // vdata(VGPR) + SDTCisVT<2, i32>, // num_channels(imm) + SDTCisVT<3, i32>, // vaddr(VGPR) + SDTCisVT<4, i32>, // soffset(SGPR) + SDTCisVT<5, i32>, // inst_offset(imm) + SDTCisVT<6, i32>, // dfmt(imm) + SDTCisVT<7, i32>, // nfmt(imm) + SDTCisVT<8, i32>, // offen(imm) + SDTCisVT<9, i32>, // idxen(imm) + SDTCisVT<10, i32>, // glc(imm) + SDTCisVT<11, i32>, // slc(imm) + SDTCisVT<12, i32> // tfe(imm) + ]>, + [SDNPMayStore, SDNPMemOperand, SDNPHasChain] +>; + +def SIload_input : SDNode<"AMDGPUISD::LOAD_INPUT", + SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisVT<1, v4i32>, SDTCisVT<2, i16>, + SDTCisVT<3, i32>]> +>; + +class SDSample : SDNode , SDTCisVT<2, v32i8>, + SDTCisVT<3, v4i32>, SDTCisVT<4, i32>]> +>; + +def SIsample : SDSample<"AMDGPUISD::SAMPLE">; +def SIsampleb : SDSample<"AMDGPUISD::SAMPLEB">; +def SIsampled : SDSample<"AMDGPUISD::SAMPLED">; +def SIsamplel : SDSample<"AMDGPUISD::SAMPLEL">; + +def SIconstdata_ptr : SDNode< + "AMDGPUISD::CONST_DATA_PTR", SDTypeProfile <1, 0, [SDTCisVT<0, i64>]> >; // Transformation function, extract the lower 32bit of a 64bit immediate @@ -21,29 +66,143 @@ def LO32 : SDNodeXFormgetTargetConstant(N->getZExtValue() & 0xffffffff, MVT::i32); }]>; +def LO32f : SDNodeXFormgetValueAPF().bitcastToAPInt().trunc(32); + return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), MVT::f32); +}]>; + // Transformation function, extract the upper 32bit of a 64bit immediate def HI32 : SDNodeXFormgetTargetConstant(N->getZExtValue() >> 32, MVT::i32); }]>; -def IMM8bitDWORD : ImmLeaf < - i32, [{ - return (Imm & ~0x3FC) == 0; - }], SDNodeXFormgetTargetConstant( - N->getZExtValue() >> 2, MVT::i32); - }]> +def HI32f : SDNodeXFormgetValueAPF().bitcastToAPInt().lshr(32).trunc(32); + return CurDAG->getTargetConstantFP(APFloat(APFloat::IEEEsingle, V), MVT::f32); +}]>; + +def IMM8bitDWORD : PatLeaf <(imm), + [{return (N->getZExtValue() & ~0x3FC) == 0;}] +>; + +def as_dword_i32imm : SDNodeXFormgetTargetConstant(N->getZExtValue() >> 2, MVT::i32); +}]>; + +def as_i1imm : SDNodeXFormgetTargetConstant(N->getZExtValue(), MVT::i1); +}]>; + +def as_i8imm : SDNodeXFormgetTargetConstant(N->getZExtValue(), MVT::i8); +}]>; + +def as_i16imm : SDNodeXFormgetTargetConstant(N->getSExtValue(), MVT::i16); +}]>; + +def as_i32imm: SDNodeXFormgetTargetConstant(N->getSExtValue(), MVT::i32); +}]>; + +def IMM8bit : PatLeaf <(imm), + [{return isUInt<8>(N->getZExtValue());}] >; -def IMM12bit : ImmLeaf < - i16, - [{return isUInt<12>(Imm);}] +def IMM12bit : PatLeaf <(imm), + [{return isUInt<12>(N->getZExtValue());}] >; -class InlineImm : ImmLeaf (N->getZExtValue());}] +>; + +def IMM32bit : PatLeaf <(imm), + [{return isUInt<32>(N->getZExtValue());}] +>; + +def mubuf_vaddr_offset : PatFrag< + (ops node:$ptr, node:$offset, node:$imm_offset), + (add (add node:$ptr, node:$offset), node:$imm_offset) +>; + +class InlineImm : PatLeaf <(vt imm), [{ + return isInlineImmediate(N); +}]>; + +class SGPRImm : PatLeaf().getGeneration() < + AMDGPUSubtarget::SOUTHERN_ISLANDS) { + return false; + } + const SIRegisterInfo *SIRI = + static_cast(TM.getSubtargetImpl()->getRegisterInfo()); + for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end(); + U != E; ++U) { + if (SIRI->isSGPRClass(getOperandRegClass(*U, U.getOperandNo()))) { + return true; + } + } + return false; }]>; +//===----------------------------------------------------------------------===// +// Custom Operands +//===----------------------------------------------------------------------===// + +def FRAMEri32 : Operand { + let MIOperandInfo = (ops i32:$ptr, i32imm:$index); +} + +def sopp_brtarget : Operand { + let EncoderMethod = "getSOPPBrEncoding"; + let OperandType = "OPERAND_PCREL"; +} + +include "SIInstrFormats.td" + +let OperandType = "OPERAND_IMMEDIATE" in { + +def offen : Operand { + let PrintMethod = "printOffen"; +} +def idxen : Operand { + let PrintMethod = "printIdxen"; +} +def addr64 : Operand { + let PrintMethod = "printAddr64"; +} +def mbuf_offset : Operand { + let PrintMethod = "printMBUFOffset"; +} +def glc : Operand { + let PrintMethod = "printGLC"; +} +def slc : Operand { + let PrintMethod = "printSLC"; +} +def tfe : Operand { + let PrintMethod = "printTFE"; +} + +} // End OperandType = "OPERAND_IMMEDIATE" + +//===----------------------------------------------------------------------===// +// Complex patterns +//===----------------------------------------------------------------------===// + +def DS1Addr1Offset : ComplexPattern; +def DS64Bit4ByteAligned : ComplexPattern; + +def MUBUFAddr32 : ComplexPattern; +def MUBUFAddr64 : ComplexPattern; +def MUBUFAddr64Atomic : ComplexPattern; +def MUBUFScratch : ComplexPattern; +def MUBUFOffset : ComplexPattern; +def MUBUFOffsetAtomic : ComplexPattern; + +def VOP3Mods0 : ComplexPattern; +def VOP3Mods : ComplexPattern; //===----------------------------------------------------------------------===// // SI assembler operands @@ -52,19 +211,20 @@ class InlineImm : ImmLeaf : Operand { - let EncoderMethod = "GPR4AlignEncode"; - let MIOperandInfo = (ops rc:$reg); +def SRCMODS { + int NONE = 0; } -class GPR2Align : Operand { - let EncoderMethod = "GPR2AlignEncode"; - let MIOperandInfo = (ops rc:$reg); +def DSTCLAMP { + int NONE = 0; } -include "SIInstrFormats.td" +def DSTOMOD { + int NONE = 0; +} //===----------------------------------------------------------------------===// // @@ -96,6 +256,12 @@ class SOP1_64 op, string opName, list pattern> : SOP1 < opName#" $dst, $src0", pattern >; +// 64-bit input, 32-bit output. +class SOP1_32_64 op, string opName, list pattern> : SOP1 < + op, (outs SReg_32:$dst), (ins SSrc_64:$src0), + opName#" $dst, $src0", pattern +>; + class SOP2_32 op, string opName, list pattern> : SOP2 < op, (outs SReg_32:$dst), (ins SSrc_32:$src0, SSrc_32:$src1), opName#" $dst, $src0, $src1", pattern @@ -106,15 +272,22 @@ class SOP2_64 op, string opName, list pattern> : SOP2 < opName#" $dst, $src0, $src1", pattern >; -class SOPC_32 op, string opName, list pattern> : SOPC < - op, (outs SCCReg:$dst), (ins SSrc_32:$src0, SSrc_32:$src1), +class SOP2_SHIFT_64 op, string opName, list pattern> : SOP2 < + op, (outs SReg_64:$dst), (ins SSrc_64:$src0, SSrc_32:$src1), opName#" $dst, $src0, $src1", pattern >; -class SOPC_64 op, string opName, list pattern> : SOPC < - op, (outs SCCReg:$dst), (ins SSrc_64:$src0, SSrc_64:$src1), - opName#" $dst, $src0, $src1", pattern ->; + +class SOPC_Helper op, RegisterClass rc, ValueType vt, + string opName, PatLeaf cond> : SOPC < + op, (outs SCCReg:$dst), (ins rc:$src0, rc:$src1), + opName#" $dst, $src0, $src1", []>; + +class SOPC_32 op, string opName, PatLeaf cond = COND_NULL> + : SOPC_Helper; + +class SOPC_64 op, string opName, PatLeaf cond = COND_NULL> + : SOPC_Helper; class SOPK_32 op, string opName, list pattern> : SOPK < op, (outs SReg_32:$dst), (ins i16imm:$src0), @@ -126,16 +299,17 @@ class SOPK_64 op, string opName, list pattern> : SOPK < opName#" $dst, $src0", pattern >; -multiclass SMRD_Helper op, string asm, RegisterClass dstClass> { +multiclass SMRD_Helper op, string asm, RegisterClass baseClass, + RegisterClass dstClass> { def _IMM : SMRD < op, 1, (outs dstClass:$dst), - (ins GPR2Align:$sbase, i32imm:$offset), + (ins baseClass:$sbase, u32imm:$offset), asm#" $dst, $sbase, $offset", [] >; def _SGPR : SMRD < op, 0, (outs dstClass:$dst), - (ins GPR2Align:$sbase, SReg_32:$soff), + (ins baseClass:$sbase, SReg_32:$soff), asm#" $dst, $sbase, $soff", [] >; } @@ -144,136 +318,631 @@ multiclass SMRD_Helper op, string asm, RegisterClass dstClass> { // Vector ALU classes //===----------------------------------------------------------------------===// -multiclass VOP1_Helper op, RegisterClass drc, RegisterClass src, - string opName, list pattern> { +// This must always be right before the operand being input modified. +def InputMods : OperandWithDefaultOps { + let PrintMethod = "printOperandAndMods"; +} +def InputModsNoDefault : Operand { + let PrintMethod = "printOperandAndMods"; +} - def _e32: VOP1 < - op, (outs drc:$dst), (ins src:$src0), - opName#"_e32 $dst, $src0", pattern - >; +class getNumSrcArgs { + int ret = + !if (!eq(Src1.Value, untyped.Value), 1, // VOP1 + !if (!eq(Src2.Value, untyped.Value), 2, // VOP2 + 3)); // VOP3 +} - def _e64 : VOP3 < - {1, 1, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}}, - (outs drc:$dst), - (ins src:$src0, - i32imm:$abs, i32imm:$clamp, - i32imm:$omod, i32imm:$neg), - opName#"_e64 $dst, $src0, $abs, $clamp, $omod, $neg", [] - > { - let SRC1 = SIOperand.ZERO; - let SRC2 = SIOperand.ZERO; - } +// Returns the register class to use for the destination of VOP[123C] +// instructions for the given VT. +class getVALUDstForVT { + RegisterClass ret = !if(!eq(VT.Size, 32), VReg_32, VReg_64); } -multiclass VOP1_32 op, string opName, list pattern> - : VOP1_Helper ; +// Returns the register class to use for source 0 of VOP[12C] +// instructions for the given VT. +class getVOPSrc0ForVT { + RegisterClass ret = !if(!eq(VT.Size, 32), VSrc_32, VSrc_64); +} -multiclass VOP1_64 op, string opName, list pattern> - : VOP1_Helper ; +// Returns the register class to use for source 1 of VOP[12C] for the +// given VT. +class getVOPSrc1ForVT { + RegisterClass ret = !if(!eq(VT.Size, 32), VReg_32, VReg_64); +} -multiclass VOP2_Helper op, RegisterClass vrc, RegisterClass arc, - string opName, list pattern> { - def _e32 : VOP2 < - op, (outs vrc:$dst), (ins arc:$src0, vrc:$src1), - opName#"_e32 $dst, $src0, $src1", pattern - >; +// Returns the register classes for the source arguments of a VOP[12C] +// instruction for the given SrcVTs. +class getInRC32 SrcVT> { + list ret = [ + getVOPSrc0ForVT.ret, + getVOPSrc1ForVT.ret + ]; +} - def _e64 : VOP3 < - {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}}, - (outs vrc:$dst), - (ins arc:$src0, vrc:$src1, - i32imm:$abs, i32imm:$clamp, - i32imm:$omod, i32imm:$neg), - opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", [] - > { - let SRC2 = SIOperand.ZERO; +// Returns the register class to use for sources of VOP3 instructions for the +// given VT. +class getVOP3SrcForVT { + RegisterClass ret = !if(!eq(VT.Size, 32), VCSrc_32, VCSrc_64); +} + +// Returns the register classes for the source arguments of a VOP3 +// instruction for the given SrcVTs. +class getInRC64 SrcVT> { + list ret = [ + getVOP3SrcForVT.ret, + getVOP3SrcForVT.ret, + getVOP3SrcForVT.ret + ]; +} + +// Returns 1 if the source arguments have modifiers, 0 if they do not. +class hasModifiers { + bit ret = !if(!eq(SrcVT.Value, f32.Value), 1, + !if(!eq(SrcVT.Value, f64.Value), 1, 0)); +} + +// Returns the input arguments for VOP[12C] instructions for the given SrcVT. +class getIns32 { + dag ret = !if(!eq(NumSrcArgs, 1), (ins Src0RC:$src0), // VOP1 + !if(!eq(NumSrcArgs, 2), (ins Src0RC:$src0, Src1RC:$src1), // VOP2 + (ins))); +} + +// Returns the input arguments for VOP3 instructions for the given SrcVT. +class getIns64 { + + dag ret = + !if (!eq(NumSrcArgs, 1), + !if (!eq(HasModifiers, 1), + // VOP1 with modifiers + (ins InputModsNoDefault:$src0_modifiers, Src0RC:$src0, + i32imm:$clamp, i32imm:$omod) + /* else */, + // VOP1 without modifiers + (ins Src0RC:$src0) + /* endif */ ), + !if (!eq(NumSrcArgs, 2), + !if (!eq(HasModifiers, 1), + // VOP 2 with modifiers + (ins InputModsNoDefault:$src0_modifiers, Src0RC:$src0, + InputModsNoDefault:$src1_modifiers, Src1RC:$src1, + i32imm:$clamp, i32imm:$omod) + /* else */, + // VOP2 without modifiers + (ins Src0RC:$src0, Src1RC:$src1) + /* endif */ ) + /* NumSrcArgs == 3 */, + !if (!eq(HasModifiers, 1), + // VOP3 with modifiers + (ins InputModsNoDefault:$src0_modifiers, Src0RC:$src0, + InputModsNoDefault:$src1_modifiers, Src1RC:$src1, + InputModsNoDefault:$src2_modifiers, Src2RC:$src2, + i32imm:$clamp, i32imm:$omod) + /* else */, + // VOP3 without modifiers + (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2) + /* endif */ ))); +} + +// Returns the assembly string for the inputs and outputs of a VOP[12C] +// instruction. This does not add the _e32 suffix, so it can be reused +// by getAsm64. +class getAsm32 { + string src1 = ", $src1"; + string src2 = ", $src2"; + string ret = " $dst, $src0"# + !if(!eq(NumSrcArgs, 1), "", src1)# + !if(!eq(NumSrcArgs, 3), src2, ""); +} + +// Returns the assembly string for the inputs and outputs of a VOP3 +// instruction. +class getAsm64 { + string src0 = "$src0_modifiers,"; + string src1 = !if(!eq(NumSrcArgs, 1), "", " $src1_modifiers,"); + string src2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers,", ""); + string ret = + !if(!eq(HasModifiers, 0), + getAsm32.ret, + " $dst, "#src0#src1#src2#" $clamp, $omod"); +} + + +class VOPProfile _ArgVT> { + + field list ArgVT = _ArgVT; + + field ValueType DstVT = ArgVT[0]; + field ValueType Src0VT = ArgVT[1]; + field ValueType Src1VT = ArgVT[2]; + field ValueType Src2VT = ArgVT[3]; + field RegisterClass DstRC = getVALUDstForVT.ret; + field RegisterClass Src0RC32 = getVOPSrc0ForVT.ret; + field RegisterClass Src1RC32 = getVOPSrc1ForVT.ret; + field RegisterClass Src0RC64 = getVOP3SrcForVT.ret; + field RegisterClass Src1RC64 = getVOP3SrcForVT.ret; + field RegisterClass Src2RC64 = getVOP3SrcForVT.ret; + + field int NumSrcArgs = getNumSrcArgs.ret; + field bit HasModifiers = hasModifiers.ret; + + field dag Outs = (outs DstRC:$dst); + + field dag Ins32 = getIns32.ret; + field dag Ins64 = getIns64.ret; + + field string Asm32 = "_e32"#getAsm32.ret; + field string Asm64 = getAsm64.ret; +} + +def VOP_F32_F32 : VOPProfile <[f32, f32, untyped, untyped]>; +def VOP_F32_F64 : VOPProfile <[f32, f64, untyped, untyped]>; +def VOP_F32_I32 : VOPProfile <[f32, i32, untyped, untyped]>; +def VOP_F64_F32 : VOPProfile <[f64, f32, untyped, untyped]>; +def VOP_F64_F64 : VOPProfile <[f64, f64, untyped, untyped]>; +def VOP_F64_I32 : VOPProfile <[f64, i32, untyped, untyped]>; +def VOP_I32_F32 : VOPProfile <[i32, f32, untyped, untyped]>; +def VOP_I32_F64 : VOPProfile <[i32, f64, untyped, untyped]>; +def VOP_I32_I32 : VOPProfile <[i32, i32, untyped, untyped]>; + +def VOP_F32_F32_F32 : VOPProfile <[f32, f32, f32, untyped]>; +def VOP_F32_F32_I32 : VOPProfile <[f32, f32, i32, untyped]>; +def VOP_F64_F64_F64 : VOPProfile <[f64, f64, f64, untyped]>; +def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>; +def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>; +def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>; +def VOP_I32_I32_I32_VCC : VOPProfile <[i32, i32, i32, untyped]> { + let Src0RC32 = VCSrc_32; +} +def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>; +def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>; + +def VOP_F32_F32_F32_F32 : VOPProfile <[f32, f32, f32, f32]>; +def VOP_F64_F64_F64_F64 : VOPProfile <[f64, f64, f64, f64]>; +def VOP_I32_I32_I32_I32 : VOPProfile <[i32, i32, i32, i32]>; +def VOP_I64_I32_I32_I64 : VOPProfile <[i64, i32, i32, i64]>; + + +class VOP { + string OpName = opName; +} + +class VOP2_REV { + string RevOp = revOp; + bit IsOrig = isOrig; +} + +class AtomicNoRet { + string NoRetOp = noRetOp; + bit IsRet = isRet; +} + +class SIMCInstr { + string PseudoInstr = pseudo; + int Subtarget = subtarget; +} + +class VOP3DisableFields { + + bits<2> src0_modifiers = !if(HasModifiers, ?, 0); + bits<2> src1_modifiers = !if(HasModifiers, !if(HasSrc1, ?, 0), 0); + bits<2> src2_modifiers = !if(HasModifiers, !if(HasSrc2, ? ,0) ,0); + bits<2> omod = !if(HasModifiers, ?, 0); + bits<1> clamp = !if(HasModifiers, ?, 0); + bits<9> src1 = !if(HasSrc1, ?, 0); + bits<9> src2 = !if(HasSrc2, ?, 0); +} + +class VOP3_Pseudo pattern, string opName> : + VOP3Common , + VOP , + SIMCInstr { + let isPseudo = 1; +} + +class VOP3_Real_si op, dag outs, dag ins, string asm, string opName> : + VOP3 , + SIMCInstr; + +multiclass VOP3_m op, dag outs, dag ins, string asm, list pattern, + string opName, int NumSrcArgs, bit HasMods = 1> { + + def "" : VOP3_Pseudo ; + + def _si : VOP3_Real_si , + VOP3DisableFields; + +} + +multiclass VOP3_1_m op, dag outs, dag ins, string asm, + list pattern, string opName, bit HasMods = 1> { + + def "" : VOP3_Pseudo ; + + def _si : VOP3_Real_si < + {1, 1, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}}, + outs, ins, asm, opName>, + VOP3DisableFields<0, 0, HasMods>; +} + +multiclass VOP3_2_m op, dag outs, dag ins, string asm, + list pattern, string opName, string revOp, + bit HasMods = 1, bit UseFullOp = 0> { + + def "" : VOP3_Pseudo , + VOP2_REV; + + def _si : VOP3_Real_si , + VOP2_REV, + VOP3DisableFields<1, 0, HasMods>; +} + +multiclass VOP3b_2_m op, dag outs, dag ins, string asm, + list pattern, string opName, string revOp, + bit HasMods = 1, bit UseFullOp = 0> { + def "" : VOP3_Pseudo , + VOP2_REV; + + // The VOP2 variant puts the carry out into VCC, the VOP3 variant + // can write it into any SGPR. We currently don't use the carry out, + // so for now hardcode it to VCC as well. + let sdst = SIOperand.VCC, Defs = [VCC] in { + def _si : VOP3b , + VOP3DisableFields<1, 0, HasMods>, + SIMCInstr, + VOP2_REV; + } // End sdst = SIOperand.VCC, Defs = [VCC] +} + +multiclass VOP3_C_m op, dag outs, dag ins, string asm, + list pattern, string opName, + bit HasMods, bit defExec> { + + def "" : VOP3_Pseudo ; + + def _si : VOP3_Real_si < + {0, op{7}, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}}, + outs, ins, asm, opName>, + VOP3DisableFields<1, 0, HasMods> { + let Defs = !if(defExec, [EXEC], []); } } -multiclass VOP2_32 op, string opName, list pattern> - : VOP2_Helper ; +multiclass VOP1_Helper op, string opName, dag outs, + dag ins32, string asm32, list pat32, + dag ins64, string asm64, list pat64, + bit HasMods> { -multiclass VOP2_64 op, string opName, list pattern> - : VOP2_Helper ; + def _e32 : VOP1 , VOP; -multiclass VOP2b_32 op, string opName, list pattern> { + defm _e64 : VOP3_1_m ; +} - def _e32 : VOP2 < - op, (outs VReg_32:$dst), (ins VSrc_32:$src0, VReg_32:$src1), - opName#"_e32 $dst, $src0, $src1", pattern - >; +multiclass VOP1Inst op, string opName, VOPProfile P, + SDPatternOperator node = null_frag> : VOP1_Helper < + op, opName, P.Outs, + P.Ins32, P.Asm32, [], + P.Ins64, P.Asm64, + !if(P.HasModifiers, + [(set P.DstVT:$dst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, + i32:$src0_modifiers, i32:$clamp, i32:$omod))))], + [(set P.DstVT:$dst, (node P.Src0VT:$src0))]), + P.HasModifiers +>; + +class VOP2_e32 op, string opName, dag outs, dag ins, string asm, + list pattern, string revOp> : + VOP2 , + VOP , + VOP2_REV; + +multiclass VOP2_Helper op, string opName, dag outs, + dag ins32, string asm32, list pat32, + dag ins64, string asm64, list pat64, + string revOp, bit HasMods> { + def _e32 : VOP2_e32 ; - def _e64 : VOP3b < + defm _e64 : VOP3_2_m < {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}}, - (outs VReg_32:$dst), - (ins VSrc_32:$src0, VReg_32:$src1, - i32imm:$abs, i32imm:$clamp, - i32imm:$omod, i32imm:$neg), - opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", [] - > { - let SRC2 = SIOperand.ZERO; - /* the VOP2 variant puts the carry out into VCC, the VOP3 variant - can write it into any SGPR. We currently don't use the carry out, - so for now hardcode it to VCC as well */ - let SDST = SIOperand.VCC; - } + outs, ins64, opName#"_e64"#asm64, pat64, opName, revOp, HasMods + >; } -multiclass VOPC_Helper op, RegisterClass vrc, RegisterClass arc, - string opName, ValueType vt, PatLeaf cond> { +multiclass VOP2Inst op, string opName, VOPProfile P, + SDPatternOperator node = null_frag, + string revOp = opName> : VOP2_Helper < + op, opName, P.Outs, + P.Ins32, P.Asm32, [], + P.Ins64, P.Asm64, + !if(P.HasModifiers, + [(set P.DstVT:$dst, + (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, + i32:$clamp, i32:$omod)), + (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))], + [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]), + revOp, P.HasModifiers +>; + +multiclass VOP2b_Helper op, string opName, dag outs, + dag ins32, string asm32, list pat32, + dag ins64, string asm64, list pat64, + string revOp, bit HasMods> { + + def _e32 : VOP2_e32 ; - def _e32 : VOPC < - op, (ins arc:$src0, vrc:$src1), - opName#"_e32 $dst, $src0, $src1", [] + defm _e64 : VOP3b_2_m < + {1, 0, 0, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}}, + outs, ins64, opName#"_e64"#asm64, pat64, opName, revOp, HasMods >; +} - def _e64 : VOP3 < - {0, op{7}, op{6}, op{5}, op{4}, op{3}, op{2}, op{1}, op{0}}, - (outs SReg_64:$dst), - (ins arc:$src0, vrc:$src1, - InstFlag:$abs, InstFlag:$clamp, - InstFlag:$omod, InstFlag:$neg), - opName#"_e64 $dst, $src0, $src1, $abs, $clamp, $omod, $neg", - !if(!eq(!cast(cond), "COND_NULL"), [], - [(set SReg_64:$dst, (i1 (setcc (vt arc:$src0), vrc:$src1, cond)))] - ) - > { - let SRC2 = SIOperand.ZERO; +multiclass VOP2bInst op, string opName, VOPProfile P, + SDPatternOperator node = null_frag, + string revOp = opName> : VOP2b_Helper < + op, opName, P.Outs, + P.Ins32, P.Asm32, [], + P.Ins64, P.Asm64, + !if(P.HasModifiers, + [(set P.DstVT:$dst, + (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, + i32:$clamp, i32:$omod)), + (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))], + [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]), + revOp, P.HasModifiers +>; + +multiclass VOPC_Helper op, string opName, + dag ins32, string asm32, list pat32, + dag out64, dag ins64, string asm64, list pat64, + bit HasMods, bit DefExec> { + def _e32 : VOPC , VOP { + let Defs = !if(DefExec, [EXEC], []); } + + defm _e64 : VOP3_C_m ; } -multiclass VOPC_32 op, string opName, - ValueType vt = untyped, PatLeaf cond = COND_NULL> - : VOPC_Helper ; +multiclass VOPCInst op, string opName, + VOPProfile P, PatLeaf cond = COND_NULL, + bit DefExec = 0> : VOPC_Helper < + op, opName, + P.Ins32, P.Asm32, [], + (outs SReg_64:$dst), P.Ins64, P.Asm64, + !if(P.HasModifiers, + [(set i1:$dst, + (setcc (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, + i32:$clamp, i32:$omod)), + (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)), + cond))], + [(set i1:$dst, (setcc P.Src0VT:$src0, P.Src1VT:$src1, cond))]), + P.HasModifiers, DefExec +>; + +multiclass VOPC_F32 op, string opName, PatLeaf cond = COND_NULL> : + VOPCInst ; + +multiclass VOPC_F64 op, string opName, PatLeaf cond = COND_NULL> : + VOPCInst ; + +multiclass VOPC_I32 op, string opName, PatLeaf cond = COND_NULL> : + VOPCInst ; + +multiclass VOPC_I64 op, string opName, PatLeaf cond = COND_NULL> : + VOPCInst ; + + +multiclass VOPCX op, string opName, VOPProfile P, + PatLeaf cond = COND_NULL> + : VOPCInst ; + +multiclass VOPCX_F32 op, string opName, PatLeaf cond = COND_NULL> : + VOPCX ; + +multiclass VOPCX_F64 op, string opName, PatLeaf cond = COND_NULL> : + VOPCX ; + +multiclass VOPCX_I32 op, string opName, PatLeaf cond = COND_NULL> : + VOPCX ; -multiclass VOPC_64 op, string opName, - ValueType vt = untyped, PatLeaf cond = COND_NULL> - : VOPC_Helper ; +multiclass VOPCX_I64 op, string opName, PatLeaf cond = COND_NULL> : + VOPCX ; -class VOP3_32 op, string opName, list pattern> : VOP3 < - op, (outs VReg_32:$dst), - (ins VSrc_32:$src0, VReg_32:$src1, VReg_32:$src2, - i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg), - opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern +multiclass VOP3_Helper op, string opName, dag outs, dag ins, string asm, + list pat, int NumSrcArgs, bit HasMods> : VOP3_m < + op, outs, ins, opName#asm, pat, opName, NumSrcArgs, HasMods >; -class VOP3_64 op, string opName, list pattern> : VOP3 < - op, (outs VReg_64:$dst), - (ins VSrc_64:$src0, VReg_64:$src1, VReg_64:$src2, - i32imm:$abs, i32imm:$clamp, i32imm:$omod, i32imm:$neg), - opName#" $dst, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern +multiclass VOP3Inst op, string opName, VOPProfile P, + SDPatternOperator node = null_frag> : VOP3_Helper < + op, opName, P.Outs, P.Ins64, P.Asm64, + !if(!eq(P.NumSrcArgs, 3), + !if(P.HasModifiers, + [(set P.DstVT:$dst, + (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, + i32:$clamp, i32:$omod)), + (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)), + (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers))))], + [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1, + P.Src2VT:$src2))]), + !if(!eq(P.NumSrcArgs, 2), + !if(P.HasModifiers, + [(set P.DstVT:$dst, + (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, + i32:$clamp, i32:$omod)), + (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))], + [(set P.DstVT:$dst, (node P.Src0VT:$src0, P.Src1VT:$src1))]) + /* P.NumSrcArgs == 1 */, + !if(P.HasModifiers, + [(set P.DstVT:$dst, + (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, + i32:$clamp, i32:$omod))))], + [(set P.DstVT:$dst, (node P.Src0VT:$src0))]))), + P.NumSrcArgs, P.HasModifiers >; +multiclass VOP3b_Helper op, RegisterClass vrc, RegisterClass arc, + string opName, list pattern> : + VOP3b_2_m < + op, (outs vrc:$dst0, SReg_64:$dst1), + (ins arc:$src0, arc:$src1, arc:$src2, + InstFlag:$abs, InstFlag:$clamp, InstFlag:$omod, InstFlag:$neg), + opName#" $dst0, $dst1, $src0, $src1, $src2, $abs, $clamp, $omod, $neg", pattern, + opName, opName, 1, 1 +>; + +multiclass VOP3b_64 op, string opName, list pattern> : + VOP3b_Helper ; + +multiclass VOP3b_32 op, string opName, list pattern> : + VOP3b_Helper ; + + +class Vop3ModPat : Pat< + (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i32:$clamp, i32:$omod)), + (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers)), + (P.Src2VT (VOP3Mods P.Src2VT:$src2, i32:$src2_modifiers))), + (Inst i32:$src0_modifiers, P.Src0VT:$src0, + i32:$src1_modifiers, P.Src1VT:$src1, + i32:$src2_modifiers, P.Src2VT:$src2, + i32:$clamp, + i32:$omod)>; + //===----------------------------------------------------------------------===// // Vector I/O classes //===----------------------------------------------------------------------===// +class DS_1A op, dag outs, dag ins, string asm, list pat> : + DS { + bits<16> offset; + + // Single load interpret the 2 i8imm operands as a single i16 offset. + let offset0 = offset{7-0}; + let offset1 = offset{15-8}; +} + +class DS_Load_Helper op, string asm, RegisterClass regClass> : DS_1A < + op, + (outs regClass:$vdst), + (ins i1imm:$gds, VReg_32:$addr, u16imm:$offset), + asm#" $vdst, $addr, $offset, [M0]", + []> { + let data0 = 0; + let data1 = 0; + let mayLoad = 1; + let mayStore = 0; +} + +class DS_Load2_Helper op, string asm, RegisterClass regClass> : DS < + op, + (outs regClass:$vdst), + (ins i1imm:$gds, VReg_32:$addr, u8imm:$offset0, u8imm:$offset1), + asm#" $vdst, $addr, $offset0, $offset1, [M0]", + []> { + let data0 = 0; + let data1 = 0; + let mayLoad = 1; + let mayStore = 0; +} + +class DS_Store_Helper op, string asm, RegisterClass regClass> : DS_1A < + op, + (outs), + (ins i1imm:$gds, VReg_32:$addr, regClass:$data0, u16imm:$offset), + asm#" $addr, $data0, $offset [M0]", + []> { + let data1 = 0; + let mayStore = 1; + let mayLoad = 0; + let vdst = 0; +} + +class DS_Store2_Helper op, string asm, RegisterClass regClass> : DS < + op, + (outs), + (ins i1imm:$gds, VReg_32:$addr, regClass:$data0, regClass:$data1, + u8imm:$offset0, u8imm:$offset1), + asm#" $addr, $data0, $data1, $offset0, $offset1 [M0]", + []> { + let mayStore = 1; + let mayLoad = 0; + let vdst = 0; +} + +// 1 address, 1 data. +class DS_1A1D_RET op, string asm, RegisterClass rc, string noRetOp = ""> : DS_1A < + op, + (outs rc:$vdst), + (ins i1imm:$gds, VReg_32:$addr, rc:$data0, u16imm:$offset), + asm#" $vdst, $addr, $data0, $offset, [M0]", []>, + AtomicNoRet { + + let data1 = 0; + let mayStore = 1; + let mayLoad = 1; + + let hasPostISelHook = 1; // Adjusted to no return version. +} + +// 1 address, 2 data. +class DS_1A2D_RET op, string asm, RegisterClass rc, string noRetOp = ""> : DS_1A < + op, + (outs rc:$vdst), + (ins i1imm:$gds, VReg_32:$addr, rc:$data0, rc:$data1, u16imm:$offset), + asm#" $vdst, $addr, $data0, $data1, $offset, [M0]", + []>, + AtomicNoRet { + let mayStore = 1; + let mayLoad = 1; + + let hasPostISelHook = 1; // Adjusted to no return version. +} + +// 1 address, 2 data. +class DS_1A2D_NORET op, string asm, RegisterClass rc, string noRetOp = asm> : DS_1A < + op, + (outs), + (ins i1imm:$gds, VReg_32:$addr, rc:$data0, rc:$data1, u16imm:$offset), + asm#" $addr, $data0, $data1, $offset, [M0]", + []>, + AtomicNoRet { + let mayStore = 1; + let mayLoad = 1; +} + +// 1 address, 1 data. +class DS_1A1D_NORET op, string asm, RegisterClass rc, string noRetOp = asm> : DS_1A < + op, + (outs), + (ins i1imm:$gds, VReg_32:$addr, rc:$data0, u16imm:$offset), + asm#" $addr, $data0, $offset, [M0]", + []>, + AtomicNoRet { + + let data1 = 0; + let mayStore = 1; + let mayLoad = 1; +} + +class MUBUFAddr64Table { + + bit IsAddr64 = is_addr64; + string OpName = NAME # suffix; +} + class MTBUF_Store_Helper op, string asm, RegisterClass regClass> : MTBUF < op, (outs), - (ins regClass:$vdata, i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, + (ins regClass:$vdata, u16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64, i8imm:$dfmt, i8imm:$nfmt, VReg_32:$vaddr, - GPR4Align:$srsrc, i1imm:$slc, i1imm:$tfe, SSrc_32:$soffset), + SReg_128:$srsrc, i1imm:$slc, i1imm:$tfe, SSrc_32:$soffset), asm#" $vdata, $offset, $offen, $idxen, $glc, $addr64, $dfmt," #" $nfmt, $vaddr, $srsrc, $slc, $tfe, $soffset", []> { @@ -281,24 +950,226 @@ class MTBUF_Store_Helper op, string asm, RegisterClass regClass> : MTBU let mayLoad = 0; } -class MUBUF_Load_Helper op, string asm, RegisterClass regClass> : MUBUF < - op, - (outs regClass:$dst), - (ins i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64, - i1imm:$lds, VReg_32:$vaddr, GPR4Align:$srsrc, i1imm:$slc, - i1imm:$tfe, SSrc_32:$soffset), - asm#" $dst, $offset, $offen, $idxen, $glc, $addr64, " - #"$lds, $vaddr, $srsrc, $slc, $tfe, $soffset", - []> { +class MUBUFAtomicAddr64 op, dag outs, dag ins, string asm, list pattern> + : MUBUF { + + let offen = 0; + let idxen = 0; + let addr64 = 1; + let tfe = 0; + let lds = 0; + let soffset = 128; +} + +class MUBUFAtomicOffset op, dag outs, dag ins, string asm, list pattern> + : MUBUF { + + let offen = 0; + let idxen = 0; + let addr64 = 0; + let tfe = 0; + let lds = 0; + let vaddr = 0; +} + +multiclass MUBUF_Atomic op, string name, RegisterClass rc, + ValueType vt, SDPatternOperator atomic> { + + let mayStore = 1, mayLoad = 1, hasPostISelHook = 1 in { + + // No return variants + let glc = 0 in { + + def _ADDR64 : MUBUFAtomicAddr64 < + op, (outs), + (ins rc:$vdata, SReg_128:$srsrc, VReg_64:$vaddr, + mbuf_offset:$offset, slc:$slc), + name#" $vdata, $vaddr, $srsrc, 0 addr64"#"$offset"#"$slc", [] + >, MUBUFAddr64Table<1>, AtomicNoRet; + + def _OFFSET : MUBUFAtomicOffset < + op, (outs), + (ins rc:$vdata, SReg_128:$srsrc, mbuf_offset:$offset, + SSrc_32:$soffset, slc:$slc), + name#" $vdata, $srsrc, $soffset"#"$offset"#"$slc", [] + >, MUBUFAddr64Table<0>, AtomicNoRet; + } // glc = 0 + + // Variant that return values + let glc = 1, Constraints = "$vdata = $vdata_in", + DisableEncoding = "$vdata_in" in { + + def _RTN_ADDR64 : MUBUFAtomicAddr64 < + op, (outs rc:$vdata), + (ins rc:$vdata_in, SReg_128:$srsrc, VReg_64:$vaddr, + mbuf_offset:$offset, slc:$slc), + name#" $vdata, $vaddr, $srsrc, 0 addr64"#"$offset"#" glc"#"$slc", + [(set vt:$vdata, + (atomic (MUBUFAddr64Atomic v4i32:$srsrc, i64:$vaddr, i16:$offset, + i1:$slc), vt:$vdata_in))] + >, MUBUFAddr64Table<1, "_RTN">, AtomicNoRet; + + def _RTN_OFFSET : MUBUFAtomicOffset < + op, (outs rc:$vdata), + (ins rc:$vdata_in, SReg_128:$srsrc, mbuf_offset:$offset, + SSrc_32:$soffset, slc:$slc), + name#" $vdata, $srsrc, $soffset"#"$offset"#" glc $slc", + [(set vt:$vdata, + (atomic (MUBUFOffsetAtomic v4i32:$srsrc, i32:$soffset, i16:$offset, + i1:$slc), vt:$vdata_in))] + >, MUBUFAddr64Table<0, "_RTN">, AtomicNoRet; + + } // glc = 1 + + } // mayStore = 1, mayLoad = 1, hasPostISelHook = 1 +} + +multiclass MUBUF_Load_Helper op, string asm, RegisterClass regClass, + ValueType load_vt = i32, + SDPatternOperator ld = null_frag> { + + let lds = 0, mayLoad = 1 in { + + let addr64 = 0 in { + + let offen = 0, idxen = 0, vaddr = 0 in { + def _OFFSET : MUBUF , + MUBUFAddr64Table<0>; + } + + let offen = 1, idxen = 0 in { + def _OFFEN : MUBUF ; + } + + let offen = 0, idxen = 1 in { + def _IDXEN : MUBUF ; + } + + let offen = 1, idxen = 1 in { + def _BOTHEN : MUBUF ; + } + } + + let offen = 0, idxen = 0, addr64 = 1, glc = 0, slc = 0, tfe = 0, soffset = 128 /* ZERO */ in { + def _ADDR64 : MUBUF , MUBUFAddr64Table<1>; + } + } +} + +multiclass MUBUF_Store_Helper op, string name, RegisterClass vdataClass, + ValueType store_vt, SDPatternOperator st> { + + let addr64 = 0, lds = 0 in { + + def "" : MUBUF < + op, (outs), + (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_32:$vaddr, SSrc_32:$soffset, + mbuf_offset:$offset, offen:$offen, idxen:$idxen, glc:$glc, slc:$slc, + tfe:$tfe), + name#" $vdata, $vaddr, $srsrc, $soffset"#"$offen"#"$idxen"#"$offset"# + "$glc"#"$slc"#"$tfe", + [] + >; + + let offen = 0, idxen = 0, vaddr = 0 in { + def _OFFSET : MUBUF < + op, (outs), + (ins vdataClass:$vdata, SReg_128:$srsrc, mbuf_offset:$offset, + SSrc_32:$soffset, glc:$glc, slc:$slc, tfe:$tfe), + name#" $vdata, $srsrc, $soffset"#"$offset"#"$glc"#"$slc"#"$tfe", + [(st store_vt:$vdata, (MUBUFOffset v4i32:$srsrc, i32:$soffset, + i16:$offset, i1:$glc, i1:$slc, + i1:$tfe))] + >, MUBUFAddr64Table<0>; + } // offen = 0, idxen = 0, vaddr = 0 + + let offen = 1, idxen = 0 in { + def _OFFEN : MUBUF < + op, (outs), + (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_32:$vaddr, SSrc_32:$soffset, + mbuf_offset:$offset, glc:$glc, slc:$slc, tfe:$tfe), + name#" $vdata, $vaddr, $srsrc, $soffset offen"#"$offset"# + "$glc"#"$slc"#"$tfe", + [] + >; + } // end offen = 1, idxen = 0 + + } // End addr64 = 0, lds = 0 + + def _ADDR64 : MUBUF < + op, (outs), + (ins vdataClass:$vdata, SReg_128:$srsrc, VReg_64:$vaddr, mbuf_offset:$offset), + name#" $vdata, $vaddr, $srsrc, 0 addr64"#"$offset", + [(st store_vt:$vdata, + (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i16:$offset))]>, MUBUFAddr64Table<1> + { + + let mayLoad = 0; + let mayStore = 1; + + // Encoding + let offen = 0; + let idxen = 0; + let glc = 0; + let addr64 = 1; + let lds = 0; + let slc = 0; + let tfe = 0; + let soffset = 128; // ZERO + } +} + +class FLAT_Load_Helper op, string asm, RegisterClass regClass> : + FLAT { + let glc = 0; + let slc = 0; + let tfe = 0; let mayLoad = 1; - let mayStore = 0; +} + +class FLAT_Store_Helper op, string name, RegisterClass vdataClass> : + FLAT { + + let mayLoad = 0; + let mayStore = 1; + + // Encoding + let glc = 0; + let slc = 0; + let tfe = 0; } class MTBUF_Load_Helper op, string asm, RegisterClass regClass> : MTBUF < op, (outs regClass:$dst), - (ins i16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64, - i8imm:$dfmt, i8imm:$nfmt, VReg_32:$vaddr, GPR4Align:$srsrc, + (ins u16imm:$offset, i1imm:$offen, i1imm:$idxen, i1imm:$glc, i1imm:$addr64, + i8imm:$dfmt, i8imm:$nfmt, VReg_32:$vaddr, SReg_128:$srsrc, i1imm:$slc, i1imm:$tfe, SSrc_32:$soffset), asm#" $dst, $offset, $offen, $idxen, $glc, $addr64, $dfmt," #" $nfmt, $vaddr, $srsrc, $slc, $tfe, $soffset", @@ -307,17 +1178,219 @@ class MTBUF_Load_Helper op, string asm, RegisterClass regClass> : MTBUF let mayStore = 0; } -class MIMG_Load_Helper op, string asm> : MIMG < +class MIMG_Mask { + string Op = op; + int Channels = channels; +} + +class MIMG_NoSampler_Helper op, string asm, + RegisterClass dst_rc, + RegisterClass src_rc> : MIMG < + op, + (outs dst_rc:$vdata), + (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128, + i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr, + SReg_256:$srsrc), + asm#" $vdata, $dmask, $unorm, $glc, $da, $r128," + #" $tfe, $lwe, $slc, $vaddr, $srsrc", + []> { + let SSAMP = 0; + let mayLoad = 1; + let mayStore = 0; + let hasPostISelHook = 1; +} + +multiclass MIMG_NoSampler_Src_Helper op, string asm, + RegisterClass dst_rc, + int channels> { + def _V1 : MIMG_NoSampler_Helper , + MIMG_Mask; + def _V2 : MIMG_NoSampler_Helper , + MIMG_Mask; + def _V4 : MIMG_NoSampler_Helper , + MIMG_Mask; +} + +multiclass MIMG_NoSampler op, string asm> { + defm _V1 : MIMG_NoSampler_Src_Helper ; + defm _V2 : MIMG_NoSampler_Src_Helper ; + defm _V3 : MIMG_NoSampler_Src_Helper ; + defm _V4 : MIMG_NoSampler_Src_Helper ; +} + +class MIMG_Sampler_Helper op, string asm, + RegisterClass dst_rc, + RegisterClass src_rc> : MIMG < op, - (outs VReg_128:$vdata), + (outs dst_rc:$vdata), (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128, - i1imm:$tfe, i1imm:$lwe, i1imm:$slc, VReg_32:$vaddr, - GPR4Align:$srsrc, GPR4Align:$ssamp), + i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr, + SReg_256:$srsrc, SReg_128:$ssamp), asm#" $vdata, $dmask, $unorm, $glc, $da, $r128," #" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp", []> { let mayLoad = 1; let mayStore = 0; + let hasPostISelHook = 1; +} + +multiclass MIMG_Sampler_Src_Helper op, string asm, + RegisterClass dst_rc, + int channels> { + def _V1 : MIMG_Sampler_Helper , + MIMG_Mask; + def _V2 : MIMG_Sampler_Helper , + MIMG_Mask; + def _V4 : MIMG_Sampler_Helper , + MIMG_Mask; + def _V8 : MIMG_Sampler_Helper , + MIMG_Mask; + def _V16 : MIMG_Sampler_Helper , + MIMG_Mask; +} + +multiclass MIMG_Sampler op, string asm> { + defm _V1 : MIMG_Sampler_Src_Helper; + defm _V2 : MIMG_Sampler_Src_Helper; + defm _V3 : MIMG_Sampler_Src_Helper; + defm _V4 : MIMG_Sampler_Src_Helper; +} + +class MIMG_Gather_Helper op, string asm, + RegisterClass dst_rc, + RegisterClass src_rc> : MIMG < + op, + (outs dst_rc:$vdata), + (ins i32imm:$dmask, i1imm:$unorm, i1imm:$glc, i1imm:$da, i1imm:$r128, + i1imm:$tfe, i1imm:$lwe, i1imm:$slc, src_rc:$vaddr, + SReg_256:$srsrc, SReg_128:$ssamp), + asm#" $vdata, $dmask, $unorm, $glc, $da, $r128," + #" $tfe, $lwe, $slc, $vaddr, $srsrc, $ssamp", + []> { + let mayLoad = 1; + let mayStore = 0; + + // DMASK was repurposed for GATHER4. 4 components are always + // returned and DMASK works like a swizzle - it selects + // the component to fetch. The only useful DMASK values are + // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns + // (red,red,red,red) etc.) The ISA document doesn't mention + // this. + // Therefore, disable all code which updates DMASK by setting these two: + let MIMG = 0; + let hasPostISelHook = 0; +} + +multiclass MIMG_Gather_Src_Helper op, string asm, + RegisterClass dst_rc, + int channels> { + def _V1 : MIMG_Gather_Helper , + MIMG_Mask; + def _V2 : MIMG_Gather_Helper , + MIMG_Mask; + def _V4 : MIMG_Gather_Helper , + MIMG_Mask; + def _V8 : MIMG_Gather_Helper , + MIMG_Mask; + def _V16 : MIMG_Gather_Helper , + MIMG_Mask; +} + +multiclass MIMG_Gather op, string asm> { + defm _V1 : MIMG_Gather_Src_Helper; + defm _V2 : MIMG_Gather_Src_Helper; + defm _V3 : MIMG_Gather_Src_Helper; + defm _V4 : MIMG_Gather_Src_Helper; +} + +//===----------------------------------------------------------------------===// +// Vector instruction mappings +//===----------------------------------------------------------------------===// + +// Maps an opcode in e32 form to its e64 equivalent +def getVOPe64 : InstrMapping { + let FilterClass = "VOP"; + let RowFields = ["OpName"]; + let ColFields = ["Size"]; + let KeyCol = ["4"]; + let ValueCols = [["8"]]; +} + +// Maps an opcode in e64 form to its e32 equivalent +def getVOPe32 : InstrMapping { + let FilterClass = "VOP"; + let RowFields = ["OpName"]; + let ColFields = ["Size"]; + let KeyCol = ["8"]; + let ValueCols = [["4"]]; +} + +// Maps an original opcode to its commuted version +def getCommuteRev : InstrMapping { + let FilterClass = "VOP2_REV"; + let RowFields = ["RevOp"]; + let ColFields = ["IsOrig"]; + let KeyCol = ["1"]; + let ValueCols = [["0"]]; +} + +def getMaskedMIMGOp : InstrMapping { + let FilterClass = "MIMG_Mask"; + let RowFields = ["Op"]; + let ColFields = ["Channels"]; + let KeyCol = ["4"]; + let ValueCols = [["1"], ["2"], ["3"] ]; +} + +// Maps an commuted opcode to its original version +def getCommuteOrig : InstrMapping { + let FilterClass = "VOP2_REV"; + let RowFields = ["RevOp"]; + let ColFields = ["IsOrig"]; + let KeyCol = ["0"]; + let ValueCols = [["1"]]; +} + +def isDS : InstrMapping { + let FilterClass = "DS"; + let RowFields = ["Inst"]; + let ColFields = ["Size"]; + let KeyCol = ["8"]; + let ValueCols = [["8"]]; +} + +def getMCOpcode : InstrMapping { + let FilterClass = "SIMCInstr"; + let RowFields = ["PseudoInstr"]; + let ColFields = ["Subtarget"]; + let KeyCol = [!cast(SISubtarget.NONE)]; + let ValueCols = [[!cast(SISubtarget.SI)]]; +} + +def getAddr64Inst : InstrMapping { + let FilterClass = "MUBUFAddr64Table"; + let RowFields = ["OpName"]; + let ColFields = ["IsAddr64"]; + let KeyCol = ["0"]; + let ValueCols = [["1"]]; +} + +// Maps an atomic opcode to its version with a return value. +def getAtomicRetOp : InstrMapping { + let FilterClass = "AtomicNoRet"; + let RowFields = ["NoRetOp"]; + let ColFields = ["IsRet"]; + let KeyCol = ["0"]; + let ValueCols = [["1"]]; +} + +// Maps an atomic opcode to its returnless version. +def getAtomicNoRetOp : InstrMapping { + let FilterClass = "AtomicNoRet"; + let RowFields = ["NoRetOp"]; + let ColFields = ["IsRet"]; + let KeyCol = ["1"]; + let ValueCols = [["0"]]; } include "SIInstructions.td"