X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTarget%2FHexagon%2FHexagonInstrInfo.td;h=037dbf7b6538587a35d14c4360648785ef45cd6f;hp=e91d811f489b9d401df77c859a84694304f0aeb7;hb=a19450f7b9cdeb2187758da9e181fa9c5dd79c81;hpb=c9092d28292e647cfc9c64f01f1f68faad085a90 diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td b/lib/Target/Hexagon/HexagonInstrInfo.td index e91d811f489..037dbf7b653 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.td +++ b/lib/Target/Hexagon/HexagonInstrInfo.td @@ -13,7 +13,7 @@ include "HexagonInstrFormats.td" include "HexagonOperands.td" - +include "HexagonInstrEnc.td" // Pattern fragment that combines the value type and the register class // into a single parameter. // The pat frags in the definitions below need to have a named register, @@ -29,8 +29,36 @@ def F64 : PatLeaf<(f64 DoubleRegs:$R)>; // 64-bit value. def LoReg: OutPatFrag<(ops node:$Rs), (EXTRACT_SUBREG (i64 $Rs), subreg_loreg)>; +def HiReg: OutPatFrag<(ops node:$Rs), + (EXTRACT_SUBREG (i64 $Rs), subreg_hireg)>; -//===----------------------------------------------------------------------===// +// SDNode for converting immediate C to C-1. +def DEC_CONST_SIGNED : SDNodeXFormgetSExtValue(); + return XformSToSM1Imm(imm, SDLoc(N)); +}]>; + +// SDNode for converting immediate C to C-2. +def DEC2_CONST_SIGNED : SDNodeXFormgetSExtValue(); + return XformSToSM2Imm(imm, SDLoc(N)); +}]>; + +// SDNode for converting immediate C to C-3. +def DEC3_CONST_SIGNED : SDNodeXFormgetSExtValue(); + return XformSToSM3Imm(imm, SDLoc(N)); +}]>; + +// SDNode for converting immediate C to C-1. +def DEC_CONST_UNSIGNED : SDNodeXFormgetZExtValue(); + return XformUToUM1Imm(imm, SDLoc(N)); +}]>; //===----------------------------------------------------------------------===// // Compare @@ -76,10 +104,16 @@ def : T_CMP_pat ; //===----------------------------------------------------------------------===// // ALU32/ALU + //===----------------------------------------------------------------------===// +// Add. + +def SDT_Int32Leaf : SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>; +def SDT_Int32Unary : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; + def SDTHexagonI64I32I32 : SDTypeProfile<1, 2, [SDTCisVT<0, i64>, SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>; def HexagonCOMBINE : SDNode<"HexagonISD::COMBINE", SDTHexagonI64I32I32>; +def HexagonPACKHL : SDNode<"HexagonISD::PACKHL", SDTHexagonI64I32I32>; let hasSideEffects = 0, hasNewValue = 1, InputType = "reg" in class T_ALU32_3op MajOp, bits<3> MinOp, bit OpsRev, @@ -140,12 +174,10 @@ class T_ALU32_combineh MajOp, bits<3> MinOp, let AsmString = "$Rd = combine($Rs"#Op1#", $Rt"#Op2#")"; } -let isCodeGenOnly = 0 in { def A2_combine_hh : T_ALU32_combineh<".h", ".h", 0b011, 0b100, 1>; def A2_combine_hl : T_ALU32_combineh<".h", ".l", 0b011, 0b101, 1>; def A2_combine_lh : T_ALU32_combineh<".l", ".h", 0b011, 0b110, 1>; def A2_combine_ll : T_ALU32_combineh<".l", ".l", 0b011, 0b111, 1>; -} class T_ALU32_3op_sfx MajOp, bits<3> MinOp, bit OpsRev, bit IsComm> @@ -153,12 +185,24 @@ class T_ALU32_3op_sfx MajOp, let AsmString = "$Rd = "#mnemonic#"($Rs, $Rt)"#suffix; } -let Defs = [USR_OVF], Itinerary = ALU32_3op_tc_2_SLOT0123, - isCodeGenOnly = 0 in { +def A2_svaddh : T_ALU32_3op<"vaddh", 0b110, 0b000, 0, 1>; +def A2_svsubh : T_ALU32_3op<"vsubh", 0b110, 0b100, 1, 0>; + +let Defs = [USR_OVF], Itinerary = ALU32_3op_tc_2_SLOT0123 in { + def A2_svaddhs : T_ALU32_3op_sfx<"vaddh", ":sat", 0b110, 0b001, 0, 1>; def A2_addsat : T_ALU32_3op_sfx<"add", ":sat", 0b110, 0b010, 0, 1>; + def A2_svadduhs : T_ALU32_3op_sfx<"vadduh", ":sat", 0b110, 0b011, 0, 1>; + def A2_svsubhs : T_ALU32_3op_sfx<"vsubh", ":sat", 0b110, 0b101, 1, 0>; def A2_subsat : T_ALU32_3op_sfx<"sub", ":sat", 0b110, 0b110, 1, 0>; + def A2_svsubuhs : T_ALU32_3op_sfx<"vsubuh", ":sat", 0b110, 0b111, 1, 0>; } +let Itinerary = ALU32_3op_tc_2_SLOT0123 in +def A2_svavghs : T_ALU32_3op_sfx<"vavgh", ":rnd", 0b111, 0b001, 0, 1>; + +def A2_svavgh : T_ALU32_3op<"vavgh", 0b111, 0b000, 0, 1>; +def A2_svnavgh : T_ALU32_3op<"vnavgh", 0b111, 0b011, 1, 0>; + multiclass T_ALU32_3op_p MajOp, bits<3> MinOp, bit OpsRev> { def t : T_ALU32_3op_pred; @@ -174,13 +218,11 @@ multiclass T_ALU32_3op_A2 MajOp, bits<3> MinOp, defm A2_p#NAME : T_ALU32_3op_p; } -let isCodeGenOnly = 0 in { defm add : T_ALU32_3op_A2<"add", 0b011, 0b000, 0, 1>; defm and : T_ALU32_3op_A2<"and", 0b001, 0b000, 0, 1>; defm or : T_ALU32_3op_A2<"or", 0b001, 0b001, 0, 1>; defm sub : T_ALU32_3op_A2<"sub", 0b011, 0b001, 1, 0>; defm xor : T_ALU32_3op_A2<"xor", 0b001, 0b011, 0, 1>; -} // Pats for instruction selection. class BinOp32_pat @@ -194,8 +236,7 @@ def: BinOp32_pat; def: BinOp32_pat; // A few special cases producing register pairs: -let OutOperandList = (outs DoubleRegs:$Rd), hasNewValue = 0, - isCodeGenOnly = 0 in { +let OutOperandList = (outs DoubleRegs:$Rd), hasNewValue = 0 in { def S2_packhl : T_ALU32_3op <"packhl", 0b101, 0b100, 0, 0>; let isPredicable = 1 in @@ -208,6 +249,9 @@ let OutOperandList = (outs DoubleRegs:$Rd), hasNewValue = 0, def C2_ccombinewnewf : T_ALU32_3op_pred<"combine", 0b101, 0b000, 0, 1, 1>; } +def: BinOp32_pat; +def: BinOp32_pat; + let hasSideEffects = 0, hasNewValue = 1, isCompare = 1, InputType = "reg" in class T_ALU32_3op_cmp MinOp, bit IsNeg, bit IsComm> : ALU32_rr<(outs PredRegs:$Pd), (ins IntRegs:$Rs, IntRegs:$Rt), @@ -229,7 +273,7 @@ class T_ALU32_3op_cmp MinOp, bit IsNeg, bit IsComm> let Inst{1-0} = Pd; } -let Itinerary = ALU32_3op_tc_2early_SLOT0123, isCodeGenOnly = 0 in { +let Itinerary = ALU32_3op_tc_2early_SLOT0123 in { def C2_cmpeq : T_ALU32_3op_cmp< "cmp.eq", 0b00, 0, 1>; def C2_cmpgt : T_ALU32_3op_cmp< "cmp.gt", 0b10, 0, 0>; def C2_cmpgtu : T_ALU32_3op_cmp< "cmp.gtu", 0b11, 0, 0>; @@ -252,8 +296,7 @@ def: T_cmp32_rr_pat; def: T_cmp32_rr_pat, i1>; def: T_cmp32_rr_pat, i1>; -let CextOpcode = "MUX", InputType = "reg", hasNewValue = 1, - isCodeGenOnly = 0 in +let CextOpcode = "MUX", InputType = "reg", hasNewValue = 1 in def C2_mux: ALU32_rr<(outs IntRegs:$Rd), (ins PredRegs:$Pu, IntRegs:$Rs, IntRegs:$Rt), "$Rd = mux($Pu, $Rs, $Rt)", [], "", ALU32_3op_tc_1_SLOT0123>, ImmRegRel { @@ -283,11 +326,11 @@ def: Pat<(i32 (select (i1 PredRegs:$Pu), (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))), let isReMaterializable = 1, isMoveImm = 1, isAsCheapAsAMove = 1, isExtentSigned = 1, isExtendable = 1, opExtentBits = 8, opExtendable = 1, - AddedComplexity = 75, isCodeGenOnly = 0 in + AddedComplexity = 75 in def A2_combineii: ALU32Inst <(outs DoubleRegs:$Rdd), (ins s8Ext:$s8, s8Imm:$S8), "$Rdd = combine(#$s8, #$S8)", [(set (i64 DoubleRegs:$Rdd), - (i64 (HexagonCOMBINE(i32 s8ExtPred:$s8), (i32 s8ImmPred:$S8))))]> { + (i64 (HexagonCOMBINE(i32 s32ImmPred:$s8), (i32 s8ImmPred:$S8))))]> { bits<5> Rdd; bits<8> s8; bits<8> S8; @@ -303,7 +346,7 @@ def A2_combineii: ALU32Inst <(outs DoubleRegs:$Rdd), (ins s8Ext:$s8, s8Imm:$S8), //===----------------------------------------------------------------------===// // Template class for predicated ADD of a reg and an Immediate value. //===----------------------------------------------------------------------===// -let hasNewValue = 1 in +let hasNewValue = 1, hasSideEffects = 0 in class T_Addri_Pred : ALU32_ri <(outs IntRegs:$Rd), (ins PredRegs:$Pu, IntRegs:$Rs, s8Ext:$s8), @@ -329,13 +372,11 @@ class T_Addri_Pred //===----------------------------------------------------------------------===// // A2_addi: Add a signed immediate to a register. //===----------------------------------------------------------------------===// -let hasNewValue = 1 in -class T_Addri pattern = [] > +let hasNewValue = 1, hasSideEffects = 0 in +class T_Addri : ALU32_ri <(outs IntRegs:$Rd), (ins IntRegs:$Rs, immOp:$s16), - "$Rd = add($Rs, #$s16)", pattern, - //[(set (i32 IntRegs:$Rd), (add (i32 IntRegs:$Rs), (s16ExtPred:$s16)))], - "", ALU32_ADDI_tc_1_SLOT0123> { + "$Rd = add($Rs, #$s16)", [], "", ALU32_ADDI_tc_1_SLOT0123> { bits<5> Rd; bits<5> Rs; bits<16> s16; @@ -353,31 +394,29 @@ class T_Addri pattern = [] > //===----------------------------------------------------------------------===// multiclass Addri_Pred { let isPredicatedFalse = PredNot in { - def _c#NAME : T_Addri_Pred; + def NAME : T_Addri_Pred; // Predicate new - def _cdn#NAME : T_Addri_Pred; + def NAME#new : T_Addri_Pred; } } -let isExtendable = 1, InputType = "imm" in +let isExtendable = 1, isExtentSigned = 1, InputType = "imm" in multiclass Addri_base { let CextOpcode = mnemonic, BaseOpcode = mnemonic#_ri in { - let opExtendable = 2, isExtentSigned = 1, opExtentBits = 16, - isPredicable = 1 in - def NAME : T_Addri< s16Ext, // Rd=add(Rs,#s16) - [(set (i32 IntRegs:$Rd), - (add IntRegs:$Rs, s16ExtPred:$s16))]>; - - let opExtendable = 3, isExtentSigned = 1, opExtentBits = 8, - hasSideEffects = 0, isPredicated = 1 in { - defm Pt : Addri_Pred; - defm NotPt : Addri_Pred; + let opExtendable = 2, opExtentBits = 16, isPredicable = 1 in + def A2_#NAME : T_Addri; + + let opExtendable = 3, opExtentBits = 8, isPredicated = 1 in { + defm A2_p#NAME#t : Addri_Pred; + defm A2_p#NAME#f : Addri_Pred; } } } -let isCodeGenOnly = 0 in -defm ADD_ri : Addri_base<"add", add>, ImmRegRel, PredNewRel; +defm addi : Addri_base<"add", add>, ImmRegRel, PredNewRel; + +def: Pat<(i32 (add I32:$Rs, s32ImmPred:$s16)), + (i32 (A2_addi I32:$Rs, imm:$s16))>; //===----------------------------------------------------------------------===// // Template class used for the following ALU32 instructions. @@ -390,7 +429,7 @@ class T_ALU32ri_logical MinOp> : ALU32_ri <(outs IntRegs:$Rd), (ins IntRegs:$Rs, s10Ext:$s10), "$Rd = "#mnemonic#"($Rs, #$s10)" , - [(set (i32 IntRegs:$Rd), (OpNode (i32 IntRegs:$Rs), s10ExtPred:$s10))]> { + [(set (i32 IntRegs:$Rd), (OpNode (i32 IntRegs:$Rs), s32ImmPred:$s10))]> { bits<5> Rd; bits<5> Rs; bits<10> s10; @@ -406,19 +445,15 @@ class T_ALU32ri_logical MinOp> let Inst{4-0} = Rd; } -let isCodeGenOnly = 0 in { -def OR_ri : T_ALU32ri_logical<"or", or, 0b10>, ImmRegRel; -def AND_ri : T_ALU32ri_logical<"and", and, 0b00>, ImmRegRel; -} +def A2_orir : T_ALU32ri_logical<"or", or, 0b10>, ImmRegRel; +def A2_andir : T_ALU32ri_logical<"and", and, 0b00>, ImmRegRel; // Subtract register from immediate // Rd32=sub(#s10,Rs32) -let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 10, -CextOpcode = "sub", InputType = "imm", hasNewValue = 1, isCodeGenOnly = 0 in -def SUB_ri: ALU32_ri <(outs IntRegs:$Rd), (ins s10Ext:$s10, IntRegs:$Rs), - "$Rd = sub(#$s10, $Rs)" , - [(set IntRegs:$Rd, (sub s10ExtPred:$s10, IntRegs:$Rs))] > , - ImmRegRel { +let isExtendable = 1, CextOpcode = "sub", opExtendable = 1, isExtentSigned = 1, + opExtentBits = 10, InputType = "imm", hasNewValue = 1, hasSideEffects = 0 in +def A2_subri: ALU32_ri <(outs IntRegs:$Rd), (ins s10Ext:$s10, IntRegs:$Rs), + "$Rd = sub(#$s10, $Rs)", []>, ImmRegRel { bits<5> Rd; bits<10> s10; bits<5> Rs; @@ -433,14 +468,18 @@ def SUB_ri: ALU32_ri <(outs IntRegs:$Rd), (ins s10Ext:$s10, IntRegs:$Rs), } // Nop. -let hasSideEffects = 0, isCodeGenOnly = 0 in +let hasSideEffects = 0 in def A2_nop: ALU32Inst <(outs), (ins), "nop" > { let IClass = 0b0111; let Inst{27-24} = 0b1111; } + +def: Pat<(sub s32ImmPred:$s10, IntRegs:$Rs), + (A2_subri imm:$s10, IntRegs:$Rs)>; + // Rd = not(Rs) gets mapped to Rd=sub(#-1, Rs). -def : Pat<(not (i32 IntRegs:$src1)), - (SUB_ri -1, (i32 IntRegs:$src1))>; +def: Pat<(not (i32 IntRegs:$src1)), + (A2_subri -1, IntRegs:$src1)>; let hasSideEffects = 0, hasNewValue = 1 in class T_tfr16 @@ -459,10 +498,8 @@ class T_tfr16 let Inst{13-0} = u16{13-0}; } -let isCodeGenOnly = 0 in { def A2_tfril: T_tfr16<0>; def A2_tfrih: T_tfr16<1>; -} // Conditional transfer is an alias to conditional "Rd = add(Rs, #0)". let isPredicated = 1, hasNewValue = 1, opNewValue = 0 in @@ -575,20 +612,17 @@ class T_TFRI_Pred let Inst{4-0} = Rd; } -let isCodeGenOnly = 0 in { def C2_cmoveit : T_TFRI_Pred<0, 0>; def C2_cmoveif : T_TFRI_Pred<1, 0>; def C2_cmovenewit : T_TFRI_Pred<0, 1>; def C2_cmovenewif : T_TFRI_Pred<1, 1>; -} let InputType = "imm", isExtendable = 1, isExtentSigned = 1, CextOpcode = "TFR", BaseOpcode = "TFRI", hasNewValue = 1, opNewValue = 0, isAsCheapAsAMove = 1 , opExtendable = 1, opExtentBits = 16, isMoveImm = 1, - isPredicated = 0, isPredicable = 1, isReMaterializable = 1, - isCodeGenOnly = 0 in + isPredicated = 0, isPredicable = 1, isReMaterializable = 1 in def A2_tfrsi : ALU32Inst<(outs IntRegs:$Rd), (ins s16Ext:$s16), "$Rd = #$s16", - [(set (i32 IntRegs:$Rd), s16ExtPred:$s16)], "", ALU32_2op_tc_1_SLOT0123>, + [(set (i32 IntRegs:$Rd), s32ImmPred:$s16)], "", ALU32_2op_tc_1_SLOT0123>, ImmRegRel, PredRel { bits<5> Rd; bits<16> s16; @@ -599,20 +633,26 @@ def A2_tfrsi : ALU32Inst<(outs IntRegs:$Rd), (ins s16Ext:$s16), "$Rd = #$s16", let Inst{4-0} = Rd; } -let isCodeGenOnly = 0 in defm A2_tfr : tfr_base<"TFR">, ImmRegRel, PredNewRel; +let isAsmParserOnly = 1 in defm A2_tfrp : TFR64_base<"TFR64">, PredNewRel; // Assembler mapped -let isReMaterializable = 1, isMoveImm = 1, isAsCheapAsAMove = 1 in +let isReMaterializable = 1, isMoveImm = 1, isAsCheapAsAMove = 1, + isAsmParserOnly = 1 in def A2_tfrpi : ALU64_rr<(outs DoubleRegs:$dst), (ins s8Imm64:$src1), "$dst = #$src1", [(set (i64 DoubleRegs:$dst), s8Imm64Pred:$src1)]>; // TODO: see if this instruction can be deleted.. -let isExtendable = 1, opExtendable = 1, opExtentBits = 6 in -def TFRI64_V4 : ALU64_rr<(outs DoubleRegs:$dst), (ins u6Ext:$src1), +let isExtendable = 1, opExtendable = 1, opExtentBits = 6, + isAsmParserOnly = 1 in { +def TFRI64_V4 : ALU64_rr<(outs DoubleRegs:$dst), (ins u64Imm:$src1), "$dst = #$src1">; +def TFRI64_V2_ext : ALU64_rr<(outs DoubleRegs:$dst), + (ins s8Ext:$src1, s8Imm:$src2), + "$dst = combine(##$src1, #$src2)">; +} //===----------------------------------------------------------------------===// // ALU32/ALU - @@ -642,28 +682,28 @@ class T_MUX1 let Inst{4-0} = Rd; } -let opExtendable = 2, isCodeGenOnly = 0 in +let opExtendable = 2 in def C2_muxri : T_MUX1<0b1, (ins PredRegs:$Pu, s8Ext:$s8, IntRegs:$Rs), "$Rd = mux($Pu, #$s8, $Rs)">; -let opExtendable = 3, isCodeGenOnly = 0 in +let opExtendable = 3 in def C2_muxir : T_MUX1<0b0, (ins PredRegs:$Pu, IntRegs:$Rs, s8Ext:$s8), "$Rd = mux($Pu, $Rs, #$s8)">; -def : Pat<(i32 (select I1:$Pu, s8ExtPred:$s8, I32:$Rs)), - (C2_muxri I1:$Pu, s8ExtPred:$s8, I32:$Rs)>; +def : Pat<(i32 (select I1:$Pu, s32ImmPred:$s8, I32:$Rs)), + (C2_muxri I1:$Pu, s32ImmPred:$s8, I32:$Rs)>; -def : Pat<(i32 (select I1:$Pu, I32:$Rs, s8ExtPred:$s8)), - (C2_muxir I1:$Pu, I32:$Rs, s8ExtPred:$s8)>; +def : Pat<(i32 (select I1:$Pu, I32:$Rs, s32ImmPred:$s8)), + (C2_muxir I1:$Pu, I32:$Rs, s32ImmPred:$s8)>; // C2_muxii: Scalar mux immediates. let isExtentSigned = 1, hasNewValue = 1, isExtendable = 1, - opExtentBits = 8, opExtendable = 2, isCodeGenOnly = 0 in + opExtentBits = 8, opExtendable = 2 in def C2_muxii: ALU32Inst <(outs IntRegs:$Rd), (ins PredRegs:$Pu, s8Ext:$s8, s8Imm:$S8), "$Rd = mux($Pu, #$s8, #$S8)" , [(set (i32 IntRegs:$Rd), - (i32 (select I1:$Pu, s8ExtPred:$s8, s8ImmPred:$S8)))] > { + (i32 (select I1:$Pu, s32ImmPred:$s8, s8ImmPred:$S8)))] > { bits<5> Rd; bits<2> Pu; bits<8> s8; @@ -679,14 +719,20 @@ def C2_muxii: ALU32Inst <(outs IntRegs:$Rd), let Inst{4-0} = Rd; } +let isCodeGenOnly = 1, isPseudo = 1 in +def MUX64_rr : ALU64_rr<(outs DoubleRegs:$Rd), + (ins PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt), + ".error \"should not emit\" ", []>; + + //===----------------------------------------------------------------------===// // template class for non-predicated alu32_2op instructions // - aslh, asrh, sxtb, sxth, zxth //===----------------------------------------------------------------------===// let hasNewValue = 1, opNewValue = 0 in class T_ALU32_2op minOp> : - ALU32Inst < (outs IntRegs:$Rd), (ins IntRegs:$Rs), - "$Rd = "#mnemonic#"($Rs)", [] > { + ALU32Inst <(outs IntRegs:$Rd), (ins IntRegs:$Rs), + "$Rd = "#mnemonic#"($Rs)", [] > { bits<5> Rd; bits<5> Rs; @@ -703,13 +749,12 @@ class T_ALU32_2op minOp> : // template class for predicated alu32_2op instructions // - aslh, asrh, sxtb, sxth, zxtb, zxth //===----------------------------------------------------------------------===// -let hasSideEffects = 0, validSubTargets = HasV4SubT, - hasNewValue = 1, opNewValue = 0 in -class T_ALU32_2op_Pred minOp, bit isPredNot, - bit isPredNew > : - ALU32Inst <(outs IntRegs:$Rd), (ins PredRegs:$Pu, IntRegs:$Rs), - !if(isPredNot, "if (!$Pu", "if ($Pu") - #!if(isPredNew, ".new) ",") ")#"$Rd = "#mnemonic#"($Rs)"> { +let hasSideEffects = 0, hasNewValue = 1, opNewValue = 0 in +class T_ALU32_2op_Pred minOp, bit isPredNot, + bit isPredNew > : + ALU32Inst <(outs IntRegs:$Rd), (ins PredRegs:$Pu, IntRegs:$Rs), + !if(isPredNot, "if (!$Pu", "if ($Pu") + #!if(isPredNew, ".new) ",") ")#"$Rd = "#mnemonic#"($Rs)"> { bits<5> Rd; bits<2> Pu; bits<5> Rs; @@ -741,20 +786,18 @@ multiclass ALU32_2op_base minOp> { let isPredicable = 1, hasSideEffects = 0 in def A2_#NAME : T_ALU32_2op; - let validSubTargets = HasV4SubT, isPredicated = 1, hasSideEffects = 0 in { + let isPredicated = 1, hasSideEffects = 0 in { defm A4_p#NAME#t : ALU32_2op_Pred; defm A4_p#NAME#f : ALU32_2op_Pred; } } } -let isCodeGenOnly = 0 in { defm aslh : ALU32_2op_base<"aslh", 0b000>, PredNewRel; defm asrh : ALU32_2op_base<"asrh", 0b001>, PredNewRel; defm sxtb : ALU32_2op_base<"sxtb", 0b101>, PredNewRel; defm sxth : ALU32_2op_base<"sxth", 0b111>, PredNewRel; defm zxth : ALU32_2op_base<"zxth", 0b110>, PredNewRel; -} // Rd=zxtb(Rs): assembler mapped to Rd=and(Rs,#255). // Compiler would want to generate 'zxtb' instead of 'and' becuase 'zxtb' has @@ -784,14 +827,13 @@ multiclass ZXTB_base minOp> { let isPredicable = 1, hasSideEffects = 0 in def A2_#NAME : T_ZXTB; - let validSubTargets = HasV4SubT, isPredicated = 1, hasSideEffects = 0 in { + let isPredicated = 1, hasSideEffects = 0 in { defm A4_p#NAME#t : ALU32_2op_Pred; defm A4_p#NAME#f : ALU32_2op_Pred; } } } -let isCodeGenOnly=0 in defm zxtb : ZXTB_base<"zxtb",0b100>, PredNewRel; def: Pat<(shl I32:$src1, (i32 16)), (A2_aslh I32:$src1)>; @@ -799,44 +841,182 @@ def: Pat<(sra I32:$src1, (i32 16)), (A2_asrh I32:$src1)>; def: Pat<(sext_inreg I32:$src1, i8), (A2_sxtb I32:$src1)>; def: Pat<(sext_inreg I32:$src1, i16), (A2_sxth I32:$src1)>; -// Mux. -def VMUX_prr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins PredRegs:$src1, - DoubleRegs:$src2, - DoubleRegs:$src3), - "$dst = vmux($src1, $src2, $src3)", - []>; +//===----------------------------------------------------------------------===// +// Template class for vector add and avg +//===----------------------------------------------------------------------===// + +class T_VectALU_64 majOp, bits<3> minOp, + bit isSat, bit isRnd, bit isCrnd, bit SwapOps > + : ALU64_rr < (outs DoubleRegs:$Rdd), + (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), + "$Rdd = "#opc#"($Rss, $Rtt)"#!if(isRnd, ":rnd", "") + #!if(isCrnd,":crnd","") + #!if(isSat, ":sat", ""), + [], "", ALU64_tc_2_SLOT23 > { + bits<5> Rdd; + bits<5> Rss; + bits<5> Rtt; + + let IClass = 0b1101; + + let Inst{27-24} = 0b0011; + let Inst{23-21} = majOp; + let Inst{20-16} = !if (SwapOps, Rtt, Rss); + let Inst{12-8} = !if (SwapOps, Rss, Rtt); + let Inst{7-5} = minOp; + let Inst{4-0} = Rdd; + } + +// ALU64 - Vector add +// Rdd=vadd[u][bhw](Rss,Rtt) +let Itinerary = ALU64_tc_1_SLOT23 in { + def A2_vaddub : T_VectALU_64 < "vaddub", 0b000, 0b000, 0, 0, 0, 0>; + def A2_vaddh : T_VectALU_64 < "vaddh", 0b000, 0b010, 0, 0, 0, 0>; + def A2_vaddw : T_VectALU_64 < "vaddw", 0b000, 0b101, 0, 0, 0, 0>; +} + +// Rdd=vadd[u][bhw](Rss,Rtt):sat +let Defs = [USR_OVF] in { + def A2_vaddubs : T_VectALU_64 < "vaddub", 0b000, 0b001, 1, 0, 0, 0>; + def A2_vaddhs : T_VectALU_64 < "vaddh", 0b000, 0b011, 1, 0, 0, 0>; + def A2_vadduhs : T_VectALU_64 < "vadduh", 0b000, 0b100, 1, 0, 0, 0>; + def A2_vaddws : T_VectALU_64 < "vaddw", 0b000, 0b110, 1, 0, 0, 0>; +} + +// ALU64 - Vector average +// Rdd=vavg[u][bhw](Rss,Rtt) +let Itinerary = ALU64_tc_1_SLOT23 in { + def A2_vavgub : T_VectALU_64 < "vavgub", 0b010, 0b000, 0, 0, 0, 0>; + def A2_vavgh : T_VectALU_64 < "vavgh", 0b010, 0b010, 0, 0, 0, 0>; + def A2_vavguh : T_VectALU_64 < "vavguh", 0b010, 0b101, 0, 0, 0, 0>; + def A2_vavgw : T_VectALU_64 < "vavgw", 0b011, 0b000, 0, 0, 0, 0>; + def A2_vavguw : T_VectALU_64 < "vavguw", 0b011, 0b011, 0, 0, 0, 0>; +} + +// Rdd=vavg[u][bhw](Rss,Rtt)[:rnd|:crnd] +def A2_vavgubr : T_VectALU_64 < "vavgub", 0b010, 0b001, 0, 1, 0, 0>; +def A2_vavghr : T_VectALU_64 < "vavgh", 0b010, 0b011, 0, 1, 0, 0>; +def A2_vavghcr : T_VectALU_64 < "vavgh", 0b010, 0b100, 0, 0, 1, 0>; +def A2_vavguhr : T_VectALU_64 < "vavguh", 0b010, 0b110, 0, 1, 0, 0>; + +def A2_vavgwr : T_VectALU_64 < "vavgw", 0b011, 0b001, 0, 1, 0, 0>; +def A2_vavgwcr : T_VectALU_64 < "vavgw", 0b011, 0b010, 0, 0, 1, 0>; +def A2_vavguwr : T_VectALU_64 < "vavguw", 0b011, 0b100, 0, 1, 0, 0>; + +// Rdd=vnavg[bh](Rss,Rtt) +let Itinerary = ALU64_tc_1_SLOT23 in { + def A2_vnavgh : T_VectALU_64 < "vnavgh", 0b100, 0b000, 0, 0, 0, 1>; + def A2_vnavgw : T_VectALU_64 < "vnavgw", 0b100, 0b011, 0, 0, 0, 1>; +} + +// Rdd=vnavg[bh](Rss,Rtt)[:rnd|:crnd]:sat +let Defs = [USR_OVF] in { + def A2_vnavghr : T_VectALU_64 < "vnavgh", 0b100, 0b001, 1, 1, 0, 1>; + def A2_vnavghcr : T_VectALU_64 < "vnavgh", 0b100, 0b010, 1, 0, 1, 1>; + def A2_vnavgwr : T_VectALU_64 < "vnavgw", 0b100, 0b100, 1, 1, 0, 1>; + def A2_vnavgwcr : T_VectALU_64 < "vnavgw", 0b100, 0b110, 1, 0, 1, 1>; +} + +// Rdd=vsub[u][bh](Rss,Rtt) +let Itinerary = ALU64_tc_1_SLOT23 in { + def A2_vsubub : T_VectALU_64 < "vsubub", 0b001, 0b000, 0, 0, 0, 1>; + def A2_vsubh : T_VectALU_64 < "vsubh", 0b001, 0b010, 0, 0, 0, 1>; + def A2_vsubw : T_VectALU_64 < "vsubw", 0b001, 0b101, 0, 0, 0, 1>; +} + +// Rdd=vsub[u][bh](Rss,Rtt):sat +let Defs = [USR_OVF] in { + def A2_vsububs : T_VectALU_64 < "vsubub", 0b001, 0b001, 1, 0, 0, 1>; + def A2_vsubhs : T_VectALU_64 < "vsubh", 0b001, 0b011, 1, 0, 0, 1>; + def A2_vsubuhs : T_VectALU_64 < "vsubuh", 0b001, 0b100, 1, 0, 0, 1>; + def A2_vsubws : T_VectALU_64 < "vsubw", 0b001, 0b110, 1, 0, 0, 1>; +} + +// Rdd=vmax[u][bhw](Rss,Rtt) +def A2_vmaxb : T_VectALU_64 < "vmaxb", 0b110, 0b110, 0, 0, 0, 1>; +def A2_vmaxub : T_VectALU_64 < "vmaxub", 0b110, 0b000, 0, 0, 0, 1>; +def A2_vmaxh : T_VectALU_64 < "vmaxh", 0b110, 0b001, 0, 0, 0, 1>; +def A2_vmaxuh : T_VectALU_64 < "vmaxuh", 0b110, 0b010, 0, 0, 0, 1>; +def A2_vmaxw : T_VectALU_64 < "vmaxw", 0b110, 0b011, 0, 0, 0, 1>; +def A2_vmaxuw : T_VectALU_64 < "vmaxuw", 0b101, 0b101, 0, 0, 0, 1>; +// Rdd=vmin[u][bhw](Rss,Rtt) +def A2_vminb : T_VectALU_64 < "vminb", 0b110, 0b111, 0, 0, 0, 1>; +def A2_vminub : T_VectALU_64 < "vminub", 0b101, 0b000, 0, 0, 0, 1>; +def A2_vminh : T_VectALU_64 < "vminh", 0b101, 0b001, 0, 0, 0, 1>; +def A2_vminuh : T_VectALU_64 < "vminuh", 0b101, 0b010, 0, 0, 0, 1>; +def A2_vminw : T_VectALU_64 < "vminw", 0b101, 0b011, 0, 0, 0, 1>; +def A2_vminuw : T_VectALU_64 < "vminuw", 0b101, 0b100, 0, 0, 0, 1>; //===----------------------------------------------------------------------===// -// ALU32/PERM - +// Template class for vector compare //===----------------------------------------------------------------------===// +let hasSideEffects = 0 in +class T_vcmp minOp> + : ALU64_rr <(outs PredRegs:$Pd), + (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), + "$Pd = "#Str#"($Rss, $Rtt)", [], + "", ALU64_tc_2early_SLOT23> { + bits<2> Pd; + bits<5> Rss; + bits<5> Rtt; + let IClass = 0b1101; + + let Inst{27-23} = 0b00100; + let Inst{13} = minOp{3}; + let Inst{7-5} = minOp{2-0}; + let Inst{1-0} = Pd; + let Inst{20-16} = Rss; + let Inst{12-8} = Rtt; + } + +class T_vcmp_pat + : Pat<(i1 (Op (T DoubleRegs:$Rss), (T DoubleRegs:$Rtt))), + (i1 (MI DoubleRegs:$Rss, DoubleRegs:$Rtt))>; + +// Vector compare bytes +def A2_vcmpbeq : T_vcmp <"vcmpb.eq", 0b0110>; +def A2_vcmpbgtu : T_vcmp <"vcmpb.gtu", 0b0111>; + +// Vector compare halfwords +def A2_vcmpheq : T_vcmp <"vcmph.eq", 0b0011>; +def A2_vcmphgt : T_vcmp <"vcmph.gt", 0b0100>; +def A2_vcmphgtu : T_vcmp <"vcmph.gtu", 0b0101>; + +// Vector compare words +def A2_vcmpweq : T_vcmp <"vcmpw.eq", 0b0000>; +def A2_vcmpwgt : T_vcmp <"vcmpw.gt", 0b0001>; +def A2_vcmpwgtu : T_vcmp <"vcmpw.gtu", 0b0010>; + +def: T_vcmp_pat; +def: T_vcmp_pat; +def: T_vcmp_pat; +def: T_vcmp_pat; +def: T_vcmp_pat; +def: T_vcmp_pat; +def: T_vcmp_pat; +def: T_vcmp_pat; //===----------------------------------------------------------------------===// -// ALU32/PRED + +// ALU32/PERM - //===----------------------------------------------------------------------===// -// SDNode for converting immediate C to C-1. -def DEC_CONST_SIGNED : SDNodeXFormgetSExtValue(); - return XformSToSM1Imm(imm); -}]>; - -// SDNode for converting immediate C to C-1. -def DEC_CONST_UNSIGNED : SDNodeXFormgetZExtValue(); - return XformUToUM1Imm(imm); -}]>; -def CTLZ64_rr : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1), - "$dst = cl0($src1)", - [(set (i32 IntRegs:$dst), (i32 (trunc (ctlz (i64 DoubleRegs:$src1)))))]>; +//===----------------------------------------------------------------------===// +// ALU32/PRED + +//===----------------------------------------------------------------------===// +// No bits needed. If cmp.ge is found the assembler parser will +// transform it to cmp.gt subtracting 1 from the immediate. +let isPseudo = 1 in { +def C2_cmpgei: ALU32Inst < + (outs PredRegs:$Pd), (ins IntRegs:$Rs, s8Ext:$s8), + "$Pd = cmp.ge($Rs, #$s8)">; +def C2_cmpgeui: ALU32Inst < + (outs PredRegs:$Pd), (ins IntRegs:$Rs, u8Ext:$s8), + "$Pd = cmp.geu($Rs, #$s8)">; +} -def CTTZ64_rr : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1), - "$dst = ct0($src1)", - [(set (i32 IntRegs:$dst), (i32 (trunc (cttz (i64 DoubleRegs:$src1)))))]>; //===----------------------------------------------------------------------===// // ALU32/PRED - @@ -845,7 +1025,8 @@ def CTTZ64_rr : SInst<(outs IntRegs:$dst), (ins DoubleRegs:$src1), //===----------------------------------------------------------------------===// // ALU64/ALU + -//===----------------------------------------------------------------------===//// Add. +//===----------------------------------------------------------------------===// +// Add. //===----------------------------------------------------------------------===// // Template Class // Add/Subtract halfword @@ -879,18 +1060,14 @@ class T_XTYPE_ADD_SUB LHbits, bit isSat, bit hasShift, bit isSub> } //Rd=sub(Rt.L,Rs.[LH]) -let isCodeGenOnly = 0 in { def A2_subh_l16_ll : T_XTYPE_ADD_SUB <0b00, 0, 0, 1>; def A2_subh_l16_hl : T_XTYPE_ADD_SUB <0b10, 0, 0, 1>; -} -let isCodeGenOnly = 0 in { //Rd=add(Rt.L,Rs.[LH]) def A2_addh_l16_ll : T_XTYPE_ADD_SUB <0b00, 0, 0, 0>; def A2_addh_l16_hl : T_XTYPE_ADD_SUB <0b10, 0, 0, 0>; -} -let Itinerary = ALU64_tc_2_SLOT23, Defs = [USR_OVF], isCodeGenOnly = 0 in { +let Itinerary = ALU64_tc_2_SLOT23, Defs = [USR_OVF] in { //Rd=sub(Rt.L,Rs.[LH]):sat def A2_subh_l16_sat_ll : T_XTYPE_ADD_SUB <0b00, 1, 0, 1>; def A2_subh_l16_sat_hl : T_XTYPE_ADD_SUB <0b10, 1, 0, 1>; @@ -901,22 +1078,18 @@ let Itinerary = ALU64_tc_2_SLOT23, Defs = [USR_OVF], isCodeGenOnly = 0 in { } //Rd=sub(Rt.[LH],Rs.[LH]):<<16 -let isCodeGenOnly = 0 in { def A2_subh_h16_ll : T_XTYPE_ADD_SUB <0b00, 0, 1, 1>; def A2_subh_h16_lh : T_XTYPE_ADD_SUB <0b01, 0, 1, 1>; def A2_subh_h16_hl : T_XTYPE_ADD_SUB <0b10, 0, 1, 1>; def A2_subh_h16_hh : T_XTYPE_ADD_SUB <0b11, 0, 1, 1>; -} //Rd=add(Rt.[LH],Rs.[LH]):<<16 -let isCodeGenOnly = 0 in { def A2_addh_h16_ll : T_XTYPE_ADD_SUB <0b00, 0, 1, 0>; def A2_addh_h16_lh : T_XTYPE_ADD_SUB <0b01, 0, 1, 0>; def A2_addh_h16_hl : T_XTYPE_ADD_SUB <0b10, 0, 1, 0>; def A2_addh_h16_hh : T_XTYPE_ADD_SUB <0b11, 0, 1, 0>; -} -let Itinerary = ALU64_tc_2_SLOT23, Defs = [USR_OVF], isCodeGenOnly = 0 in { +let Itinerary = ALU64_tc_2_SLOT23, Defs = [USR_OVF] in { //Rd=sub(Rt.[LH],Rs.[LH]):sat:<<16 def A2_subh_h16_sat_ll : T_XTYPE_ADD_SUB <0b00, 1, 1, 1>; def A2_subh_h16_sat_lh : T_XTYPE_ADD_SUB <0b01, 1, 1, 1>; @@ -947,7 +1120,7 @@ def: Pat<(sext_inreg (sub I32:$src1, I32:$src2), i16), def: Pat<(shl (sub I32:$src1, I32:$src2), (i32 16)), (A2_subh_h16_ll I32:$src1, I32:$src2)>; -let hasSideEffects = 0, hasNewValue = 1, isCodeGenOnly = 0 in +let hasSideEffects = 0, hasNewValue = 1 in def S2_parityp: ALU64Inst<(outs IntRegs:$Rd), (ins DoubleRegs:$Rs, DoubleRegs:$Rt), "$Rd = parity($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> { @@ -981,12 +1154,10 @@ class T_XTYPE_MIN_MAX < bit isMax, bit isUnsigned > let Inst{20-16} = !if(isMax, Rt, Rs); } -let isCodeGenOnly = 0 in { def A2_min : T_XTYPE_MIN_MAX < 0, 0 >; def A2_minu : T_XTYPE_MIN_MAX < 0, 1 >; def A2_max : T_XTYPE_MIN_MAX < 1, 0 >; def A2_maxu : T_XTYPE_MIN_MAX < 1, 1 >; -} // Here, depending on the operand being selected, we'll either generate a // min or max instruction. @@ -1053,11 +1224,9 @@ class T_cmp64_rr MinOp, bit IsComm> let Inst{1-0} = Pd; } -let isCodeGenOnly = 0 in { def C2_cmpeqp : T_cmp64_rr<"cmp.eq", 0b000, 1>; def C2_cmpgtp : T_cmp64_rr<"cmp.gt", 0b010, 0>; def C2_cmpgtup : T_cmp64_rr<"cmp.gtu", 0b100, 0>; -} class T_cmp64_rr_pat : Pat<(i1 (CmpOp (i64 DoubleRegs:$Rs), (i64 DoubleRegs:$Rt))), @@ -1069,6 +1238,24 @@ def: T_cmp64_rr_pat; def: T_cmp64_rr_pat>; def: T_cmp64_rr_pat>; +def C2_vmux : ALU64_rr<(outs DoubleRegs:$Rd), + (ins PredRegs:$Pu, DoubleRegs:$Rs, DoubleRegs:$Rt), + "$Rd = vmux($Pu, $Rs, $Rt)", [], "", ALU64_tc_1_SLOT23> { + let hasSideEffects = 0; + + bits<5> Rd; + bits<2> Pu; + bits<5> Rs; + bits<5> Rt; + + let IClass = 0b1101; + let Inst{27-24} = 0b0001; + let Inst{20-16} = Rs; + let Inst{12-8} = Rt; + let Inst{6-5} = Pu; + let Inst{4-0} = Rd; +} + class T_ALU64_rr RegType, bits<3> MajOp, bits<3> MinOp, bit OpsRev, bit IsComm, string Op2Pfx> @@ -1096,10 +1283,8 @@ class T_ALU64_arith MajOp, bits<3> MinOp, bit IsSat, : T_ALU64_rr; -let isCodeGenOnly = 0 in { def A2_addp : T_ALU64_arith<"add", 0b000, 0b111, 0, 0, 1>; def A2_subp : T_ALU64_arith<"sub", 0b001, 0b111, 0, 1, 0>; -} def: Pat<(i64 (add I64:$Rs, I64:$Rt)), (A2_addp I64:$Rs, I64:$Rt)>; def: Pat<(i64 (sub I64:$Rs, I64:$Rt)), (A2_subp I64:$Rs, I64:$Rt)>; @@ -1109,11 +1294,9 @@ class T_ALU64_logical MinOp, bit OpsRev, bit IsComm, : T_ALU64_rr; -let isCodeGenOnly = 0 in { def A2_andp : T_ALU64_logical<"and", 0b000, 0, 1, 0>; def A2_orp : T_ALU64_logical<"or", 0b010, 0, 1, 0>; def A2_xorp : T_ALU64_logical<"xor", 0b100, 0, 1, 0>; -} def: Pat<(i64 (and I64:$Rs, I64:$Rt)), (A2_andp I64:$Rs, I64:$Rt)>; def: Pat<(i64 (or I64:$Rs, I64:$Rt)), (A2_orp I64:$Rs, I64:$Rt)>; @@ -1165,11 +1348,9 @@ class T_LOGICAL_1OP OpBits> let Inst{1-0} = Pd; } -let isCodeGenOnly = 0 in { def C2_any8 : T_LOGICAL_1OP<"any8", 0b00>; def C2_all8 : T_LOGICAL_1OP<"all8", 0b01>; def C2_not : T_LOGICAL_1OP<"not", 0b10>; -} def: Pat<(i1 (not (i1 PredRegs:$Ps))), (C2_not PredRegs:$Ps)>; @@ -1193,13 +1374,11 @@ class T_LOGICAL_2OP OpBits, bit IsNeg, bit Rev> let Inst{1-0} = Pd; } -let isCodeGenOnly = 0 in { def C2_and : T_LOGICAL_2OP<"and", 0b000, 0, 1>; def C2_or : T_LOGICAL_2OP<"or", 0b001, 0, 1>; def C2_xor : T_LOGICAL_2OP<"xor", 0b010, 0, 0>; def C2_andn : T_LOGICAL_2OP<"and", 0b011, 1, 1>; def C2_orn : T_LOGICAL_2OP<"or", 0b111, 1, 1>; -} def: Pat<(i1 (and I1:$Ps, I1:$Pt)), (C2_and I1:$Ps, I1:$Pt)>; def: Pat<(i1 (or I1:$Ps, I1:$Pt)), (C2_or I1:$Ps, I1:$Pt)>; @@ -1207,7 +1386,7 @@ def: Pat<(i1 (xor I1:$Ps, I1:$Pt)), (C2_xor I1:$Ps, I1:$Pt)>; def: Pat<(i1 (and I1:$Ps, (not I1:$Pt))), (C2_andn I1:$Ps, I1:$Pt)>; def: Pat<(i1 (or I1:$Ps, (not I1:$Pt))), (C2_orn I1:$Ps, I1:$Pt)>; -let hasSideEffects = 0, hasNewValue = 1, isCodeGenOnly = 0 in +let hasSideEffects = 0, hasNewValue = 1 in def C2_vitpack : SInst<(outs IntRegs:$Rd), (ins PredRegs:$Ps, PredRegs:$Pt), "$Rd = vitpack($Ps, $Pt)", [], "", S_2op_tc_1_SLOT23> { bits<5> Rd; @@ -1222,7 +1401,7 @@ def C2_vitpack : SInst<(outs IntRegs:$Rd), (ins PredRegs:$Ps, PredRegs:$Pt), let Inst{4-0} = Rd; } -let hasSideEffects = 0, isCodeGenOnly = 0 in +let hasSideEffects = 0 in def C2_mask : SInst<(outs DoubleRegs:$Rd), (ins PredRegs:$Pt), "$Rd = mask($Pt)", [], "", S_2op_tc_1_SLOT23> { bits<5> Rd; @@ -1234,18 +1413,6 @@ def C2_mask : SInst<(outs DoubleRegs:$Rd), (ins PredRegs:$Pt), let Inst{4-0} = Rd; } -def VALIGN_rrp : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, - DoubleRegs:$src2, - PredRegs:$src3), - "$dst = valignb($src1, $src2, $src3)", - []>; - -def VSPLICE_rrp : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, - DoubleRegs:$src2, - PredRegs:$src3), - "$dst = vspliceb($src1, $src2, $src3)", - []>; - // User control register transfer. //===----------------------------------------------------------------------===// // CR - @@ -1256,7 +1423,7 @@ def VSPLICE_rrp : SInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, //===----------------------------------------------------------------------===// def retflag : SDNode<"HexagonISD::RET_FLAG", SDTNone, - [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; def eh_return: SDNode<"HexagonISD::EH_RETURN", SDTNone, [SDNPHasChain]>; def SDHexagonBR_JT: SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; @@ -1266,7 +1433,7 @@ class CondStr { string S = "if (" # !if(True,"","!") # CReg # !if(New,".new","") # ") "; } class JumpOpcStr { - string S = Mnemonic # !if(New, !if(Taken,":t",":nt"), ""); + string S = Mnemonic # !if(Taken, ":t", !if(New, ":nt", "")); } let isBranch = 1, isBarrier = 1, Defs = [PC], hasSideEffects = 0, @@ -1304,7 +1471,7 @@ class T_JMP_c let Inst{27-24} = 0b1100; let Inst{21} = PredNot; - let Inst{12} = !if(isPredNew, isTak, zero); + let Inst{12} = isTak; let Inst{11} = isPredNew; let Inst{9-8} = src; let Inst{23-22} = dst{16-15}; @@ -1314,7 +1481,7 @@ class T_JMP_c } multiclass JMP_Pred { - def NAME : T_JMP_c; + def NAME : T_JMP_c; // not taken // Predicate new def NAME#newpt : T_JMP_c; // taken def NAME#new : T_JMP_c; // not taken @@ -1361,13 +1528,13 @@ class T_JMPr_c let Inst{27-22} = 0b001101; let Inst{21} = PredNot; let Inst{20-16} = dst; - let Inst{12} = !if(isPredNew, isTak, zero); + let Inst{12} = isTak; let Inst{11} = isPredNew; let Inst{9-8} = src; } multiclass JMPR_Pred { - def NAME: T_JMPr_c; + def NAME : T_JMPr_c; // not taken // Predicate new def NAME#newpt : T_JMPr_c; // taken def NAME#new : T_JMPr_c; // not taken @@ -1404,12 +1571,12 @@ class JUMPR_MISC_CALLR; def J2_callrf : JUMPR_MISC_CALLR<1, 1, (ins PredRegs:$Pu, IntRegs:$Rs)>; } -let isTerminator = 1, hasSideEffects = 0, isCodeGenOnly = 0 in { +let isTerminator = 1, hasSideEffects = 0 in { defm J2_jump : JMP_base<"JMP", "">, PredNewRel; // Deal with explicit assembly @@ -1451,6 +1618,8 @@ def: Pat<(brind (i32 IntRegs:$dst)), //===----------------------------------------------------------------------===// // LD + //===----------------------------------------------------------------------===// + +// Load - Base with Immediate offset addressing mode let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, AddedComplexity = 20 in class T_load_io MajOp, Operand ImmOp> @@ -1471,7 +1640,7 @@ class T_load_io MajOp, !if (!eq(ImmOpStr, "s11_2Ext"), 13, !if (!eq(ImmOpStr, "s11_1Ext"), 12, /* s11_0Ext */ 11))); - let hasNewValue = !if (!eq(ImmOpStr, "s11_3Ext"), 0, 1); + let hasNewValue = !if (!eq(!cast(RC), "DoubleRegs"), 0, 1); let IClass = 0b1001; @@ -1542,276 +1711,620 @@ multiclass LD_Idxd; defm loadrub: LD_Idxd <"memub", "LDriub", IntRegs, s11_0Ext, u6_0Ext, 0b1001>; } -let accessSize = HalfWordAccess, opExtentAlign = 1, isCodeGenOnly = 0 in { +let accessSize = HalfWordAccess, opExtentAlign = 1 in { defm loadrh: LD_Idxd <"memh", "LDrih", IntRegs, s11_1Ext, u6_1Ext, 0b1010>; defm loadruh: LD_Idxd <"memuh", "LDriuh", IntRegs, s11_1Ext, u6_1Ext, 0b1011>; } -/// -// Load -- MEMri operand -multiclass LD_MEMri_Pbase { - let isPredicatedNew = isPredNew in - def NAME : LDInst2<(outs RC:$dst), - (ins PredRegs:$src1, MEMri:$addr), - !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#"$dst = "#mnemonic#"($addr)", - []>; -} - -multiclass LD_MEMri_Pred { - let isPredicatedFalse = PredNot in { - defm _c#NAME : LD_MEMri_Pbase; - // Predicate new - defm _cdn#NAME : LD_MEMri_Pbase; - } -} +let accessSize = WordAccess, opExtentAlign = 2 in +defm loadri: LD_Idxd <"memw", "LDriw", IntRegs, s11_2Ext, u6_2Ext, 0b1100>; -let isExtendable = 1, hasSideEffects = 0 in -multiclass LD_MEMri ImmBits, bits<5> PredImmBits> { +let accessSize = DoubleWordAccess, opExtentAlign = 3 in +defm loadrd: LD_Idxd <"memd", "LDrid", DoubleRegs, s11_3Ext, u6_3Ext, 0b1110>; - let CextOpcode = CextOp, BaseOpcode = CextOp in { - let opExtendable = 2, isExtentSigned = 1, opExtentBits = ImmBits, - isPredicable = 1 in - def NAME : LDInst2<(outs RC:$dst), (ins MEMri:$addr), - "$dst = "#mnemonic#"($addr)", - []>; - - let opExtendable = 3, isExtentSigned = 0, opExtentBits = PredImmBits, - isPredicated = 1 in { - defm Pt : LD_MEMri_Pred; - defm NotPt : LD_MEMri_Pred; - } - } +let accessSize = HalfWordAccess, opExtentAlign = 1 in { + def L2_loadbsw2_io: T_load_io<"membh", IntRegs, 0b0001, s11_1Ext>; + def L2_loadbzw2_io: T_load_io<"memubh", IntRegs, 0b0011, s11_1Ext>; } -let addrMode = BaseImmOffset, isMEMri = "true" in { - let accessSize = WordAccess in - defm LDriw: LD_MEMri < "memw", "LDriw", IntRegs, 13, 8>, AddrModeRel; - - let accessSize = DoubleWordAccess in - defm LDrid: LD_MEMri < "memd", "LDrid", DoubleRegs, 14, 9>, AddrModeRel; +let accessSize = WordAccess, opExtentAlign = 2 in { + def L2_loadbzw4_io: T_load_io<"memubh", DoubleRegs, 0b0101, s11_2Ext>; + def L2_loadbsw4_io: T_load_io<"membh", DoubleRegs, 0b0111, s11_2Ext>; } -def : Pat < (i32 (sextloadi8 ADDRriS11_0:$addr)), - (L2_loadrb_io AddrFI:$addr, 0) >; - -def : Pat < (i32 (zextloadi8 ADDRriS11_0:$addr)), - (L2_loadrub_io AddrFI:$addr, 0) >; - -def : Pat < (i32 (sextloadi16 ADDRriS11_1:$addr)), - (L2_loadrh_io AddrFI:$addr, 0) >; - -def : Pat < (i32 (zextloadi16 ADDRriS11_1:$addr)), - (L2_loadruh_io AddrFI:$addr, 0) >; - -def : Pat < (i32 (load ADDRriS11_2:$addr)), - (LDriw ADDRriS11_2:$addr) >; - -def : Pat < (i64 (load ADDRriS11_3:$addr)), - (LDrid ADDRriS11_3:$addr) >; +let addrMode = BaseImmOffset, isExtendable = 1, hasSideEffects = 0, + opExtendable = 3, isExtentSigned = 1 in +class T_loadalign_io MajOp, Operand ImmOp> + : LDInst<(outs DoubleRegs:$dst), + (ins DoubleRegs:$src1, IntRegs:$src2, ImmOp:$offset), + "$dst = "#str#"($src2 + #$offset)", [], + "$src1 = $dst">, AddrModeRel { + bits<4> name; + bits<5> dst; + bits<5> src2; + bits<12> offset; + bits<11> offsetBits; + let offsetBits = !if (!eq(!cast(ImmOp), "s11_1Ext"), offset{11-1}, + /* s11_0Ext */ offset{10-0}); + let IClass = 0b1001; -// Load - Base with Immediate offset addressing mode -multiclass LD_Idxd_Pbase2 { - let isPredicatedNew = isPredNew in - def NAME : LDInst2<(outs RC:$dst), - (ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3), - !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#"$dst = "#mnemonic#"($src2+#$src3)", - []>; -} - -multiclass LD_Idxd_Pred2 { - let isPredicatedFalse = PredNot in { - defm _c#NAME : LD_Idxd_Pbase2; - // Predicate new - defm _cdn#NAME : LD_Idxd_Pbase2; + let Inst{27} = 0b0; + let Inst{26-25} = offsetBits{10-9}; + let Inst{24-21} = MajOp; + let Inst{20-16} = src2; + let Inst{13-5} = offsetBits{8-0}; + let Inst{4-0} = dst; } -} - -let isExtendable = 1, hasSideEffects = 0 in -multiclass LD_Idxd2 ImmBits, - bits<5> PredImmBits> { - let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed in { - let opExtendable = 2, isExtentSigned = 1, opExtentBits = ImmBits, - isPredicable = 1, AddedComplexity = 20 in - def NAME : LDInst2<(outs RC:$dst), (ins IntRegs:$src1, ImmOp:$offset), - "$dst = "#mnemonic#"($src1+#$offset)", - []>; - - let opExtendable = 3, isExtentSigned = 0, opExtentBits = PredImmBits, - isPredicated = 1 in { - defm Pt : LD_Idxd_Pred2; - defm NotPt : LD_Idxd_Pred2; - } - } -} +let accessSize = HalfWordAccess, opExtentBits = 12, opExtentAlign = 1 in +def L2_loadalignh_io: T_loadalign_io <"memh_fifo", 0b0010, s11_1Ext>; -let addrMode = BaseImmOffset in { - let accessSize = WordAccess in - defm LDriw_indexed: LD_Idxd2 <"memw", "LDriw", IntRegs, s11_2Ext, u6_2Ext, - 13, 8>, AddrModeRel; +let accessSize = ByteAccess, opExtentBits = 11 in +def L2_loadalignb_io: T_loadalign_io <"memb_fifo", 0b0100, s11_0Ext>; - let accessSize = DoubleWordAccess in - defm LDrid_indexed: LD_Idxd2 <"memd", "LDrid", DoubleRegs, s11_3Ext, u6_3Ext, - 14, 9>, AddrModeRel; +// Patterns to select load-indexed (i.e. load from base+offset). +multiclass Loadx_pat { + def: Pat<(VT (Load AddrFI:$fi)), (VT (MI AddrFI:$fi, 0))>; + def: Pat<(VT (Load (add (i32 AddrFI:$fi), ImmPred:$Off))), + (VT (MI AddrFI:$fi, imm:$Off))>; + def: Pat<(VT (Load (add (i32 IntRegs:$Rs), ImmPred:$Off))), + (VT (MI IntRegs:$Rs, imm:$Off))>; + def: Pat<(VT (Load (i32 IntRegs:$Rs))), (VT (MI IntRegs:$Rs, 0))>; } let AddedComplexity = 20 in { -def : Pat < (i32 (sextloadi8 (add IntRegs:$src1, s11_0ExtPred:$offset))), - (L2_loadrb_io IntRegs:$src1, s11_0ExtPred:$offset) >; - -def : Pat < (i32 (zextloadi8 (add IntRegs:$src1, s11_0ExtPred:$offset))), - (L2_loadrub_io IntRegs:$src1, s11_0ExtPred:$offset) >; + defm: Loadx_pat; + defm: Loadx_pat; + defm: Loadx_pat; + defm: Loadx_pat; + defm: Loadx_pat; + defm: Loadx_pat; + + defm: Loadx_pat; + defm: Loadx_pat; + defm: Loadx_pat; + defm: Loadx_pat; + defm: Loadx_pat; + defm: Loadx_pat; + defm: Loadx_pat; + defm: Loadx_pat; + // No sextloadi1. +} + +// Sign-extending loads of i1 need to replicate the lowest bit throughout +// the 32-bit value. Since the loaded value can only be 0 or 1, 0-v should +// do the trick. +let AddedComplexity = 20 in +def: Pat<(i32 (sextloadi1 (i32 IntRegs:$Rs))), + (A2_subri 0, (L2_loadrub_io IntRegs:$Rs, 0))>; -def : Pat < (i32 (sextloadi16 (add IntRegs:$src1, s11_1ExtPred:$offset))), - (L2_loadrh_io IntRegs:$src1, s11_1ExtPred:$offset) >; +//===----------------------------------------------------------------------===// +// Post increment load +//===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// Template class for non-predicated post increment loads with immediate offset. +//===----------------------------------------------------------------------===// +let hasSideEffects = 0, addrMode = PostInc in +class T_load_pi MajOp > + : LDInstPI <(outs RC:$dst, IntRegs:$dst2), + (ins IntRegs:$src1, ImmOp:$offset), + "$dst = "#mnemonic#"($src1++#$offset)" , + [], + "$src1 = $dst2" > , + PredNewRel { + bits<5> dst; + bits<5> src1; + bits<7> offset; + bits<4> offsetBits; -def : Pat < (i32 (zextloadi16 (add IntRegs:$src1, s11_1ExtPred:$offset))), - (L2_loadruh_io IntRegs:$src1, s11_1ExtPred:$offset) >; + string ImmOpStr = !cast(ImmOp); + let offsetBits = !if (!eq(ImmOpStr, "s4_3Imm"), offset{6-3}, + !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2}, + !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1}, + /* s4_0Imm */ offset{3-0}))); + let hasNewValue = !if (!eq(ImmOpStr, "s4_3Imm"), 0, 1); -def : Pat < (i32 (load (add IntRegs:$src1, s11_2ExtPred:$offset))), - (LDriw_indexed IntRegs:$src1, s11_2ExtPred:$offset) >; + let IClass = 0b1001; -def : Pat < (i64 (load (add IntRegs:$src1, s11_3ExtPred:$offset))), - (LDrid_indexed IntRegs:$src1, s11_3ExtPred:$offset) >; -} + let Inst{27-25} = 0b101; + let Inst{24-21} = MajOp; + let Inst{20-16} = src1; + let Inst{13-12} = 0b00; + let Inst{8-5} = offsetBits; + let Inst{4-0} = dst; + } //===----------------------------------------------------------------------===// -// Post increment load +// Template class for predicated post increment loads with immediate offset. //===----------------------------------------------------------------------===// +let isPredicated = 1, hasSideEffects = 0, addrMode = PostInc in +class T_pload_pi MajOp, bit isPredNot, bit isPredNew > + : LDInst <(outs RC:$dst, IntRegs:$dst2), + (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset), + !if(isPredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", + ") ")#"$dst = "#mnemonic#"($src2++#$offset)", + [] , + "$src2 = $dst2" > , + PredNewRel { + bits<5> dst; + bits<2> src1; + bits<5> src2; + bits<7> offset; + bits<4> offsetBits; -multiclass LD_PostInc_Pbase { - let isPredicatedNew = isPredNew in - def NAME : LDInst2PI<(outs RC:$dst, IntRegs:$dst2), - (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset), - !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#"$dst = "#mnemonic#"($src2++#$offset)", - [], - "$src2 = $dst2">; -} + let isPredicatedNew = isPredNew; + let isPredicatedFalse = isPredNot; -multiclass LD_PostInc_Pred { - let isPredicatedFalse = PredNot in { - defm _c#NAME : LD_PostInc_Pbase; - // Predicate new - let Predicates = [HasV4T], validSubTargets = HasV4SubT in - defm _cdn#NAME#_V4 : LD_PostInc_Pbase; + string ImmOpStr = !cast(ImmOp); + let offsetBits = !if (!eq(ImmOpStr, "s4_3Imm"), offset{6-3}, + !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2}, + !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1}, + /* s4_0Imm */ offset{3-0}))); + let hasNewValue = !if (!eq(ImmOpStr, "s4_3Imm"), 0, 1); + + let IClass = 0b1001; + + let Inst{27-25} = 0b101; + let Inst{24-21} = MajOp; + let Inst{20-16} = src2; + let Inst{13} = 0b1; + let Inst{12} = isPredNew; + let Inst{11} = isPredNot; + let Inst{10-9} = src1; + let Inst{8-5} = offsetBits; + let Inst{4-0} = dst; } -} -multiclass LD_PostInc { +//===----------------------------------------------------------------------===// +// Multiclass for post increment loads with immediate offset. +//===----------------------------------------------------------------------===// +multiclass LD_PostInc MajOp> { let BaseOpcode = "POST_"#BaseOp in { let isPredicable = 1 in - def NAME : LDInst2PI<(outs RC:$dst, IntRegs:$dst2), - (ins IntRegs:$src1, ImmOp:$offset), - "$dst = "#mnemonic#"($src1++#$offset)", - [], - "$src1 = $dst2">; - - let isPredicated = 1 in { - defm Pt : LD_PostInc_Pred; - defm NotPt : LD_PostInc_Pred; - } + def L2_#NAME#_pi : T_load_pi < mnemonic, RC, ImmOp, MajOp>; + + // Predicated + def L2_p#NAME#t_pi : T_pload_pi < mnemonic, RC, ImmOp, MajOp, 0, 0>; + def L2_p#NAME#f_pi : T_pload_pi < mnemonic, RC, ImmOp, MajOp, 1, 0>; + + // Predicated new + def L2_p#NAME#tnew_pi : T_pload_pi < mnemonic, RC, ImmOp, MajOp, 0, 1>; + def L2_p#NAME#fnew_pi : T_pload_pi < mnemonic, RC, ImmOp, MajOp, 1, 1>; } } -let hasCtrlDep = 1, hasSideEffects = 0, addrMode = PostInc in { - defm POST_LDrib : LD_PostInc<"memb", "LDrib", IntRegs, s4_0Imm>, - PredNewRel; - defm POST_LDriub : LD_PostInc<"memub", "LDriub", IntRegs, s4_0Imm>, - PredNewRel; - defm POST_LDrih : LD_PostInc<"memh", "LDrih", IntRegs, s4_1Imm>, - PredNewRel; - defm POST_LDriuh : LD_PostInc<"memuh", "LDriuh", IntRegs, s4_1Imm>, - PredNewRel; - defm POST_LDriw : LD_PostInc<"memw", "LDriw", IntRegs, s4_2Imm>, - PredNewRel; - defm POST_LDrid : LD_PostInc<"memd", "LDrid", DoubleRegs, s4_3Imm>, - PredNewRel; +// post increment byte loads with immediate offset +let accessSize = ByteAccess in { + defm loadrb : LD_PostInc <"memb", "LDrib", IntRegs, s4_0Imm, 0b1000>; + defm loadrub : LD_PostInc <"memub", "LDriub", IntRegs, s4_0Imm, 0b1001>; } -def : Pat< (i32 (extloadi1 ADDRriS11_0:$addr)), - (i32 (L2_loadrb_io AddrFI:$addr, 0)) >; - -// Load byte any-extend. -def : Pat < (i32 (extloadi8 ADDRriS11_0:$addr)), - (i32 (L2_loadrb_io AddrFI:$addr, 0)) >; - -// Indexed load byte any-extend. -let AddedComplexity = 20 in -def : Pat < (i32 (extloadi8 (add IntRegs:$src1, s11_0ImmPred:$offset))), - (i32 (L2_loadrb_io IntRegs:$src1, s11_0ImmPred:$offset)) >; - -def : Pat < (i32 (extloadi16 ADDRriS11_1:$addr)), - (i32 (L2_loadrh_io AddrFI:$addr, 0))>; - -let AddedComplexity = 20 in -def : Pat < (i32 (extloadi16 (add IntRegs:$src1, s11_1ImmPred:$offset))), - (i32 (L2_loadrh_io IntRegs:$src1, s11_1ImmPred:$offset)) >; +// post increment halfword loads with immediate offset +let accessSize = HalfWordAccess, opExtentAlign = 1 in { + defm loadrh : LD_PostInc <"memh", "LDrih", IntRegs, s4_1Imm, 0b1010>; + defm loadruh : LD_PostInc <"memuh", "LDriuh", IntRegs, s4_1Imm, 0b1011>; +} -let AddedComplexity = 10 in -def : Pat < (i32 (zextloadi1 ADDRriS11_0:$addr)), - (i32 (L2_loadrub_io AddrFI:$addr, 0))>; +// post increment word loads with immediate offset +let accessSize = WordAccess, opExtentAlign = 2 in +defm loadri : LD_PostInc <"memw", "LDriw", IntRegs, s4_2Imm, 0b1100>; -let AddedComplexity = 20 in -def : Pat < (i32 (zextloadi1 (add IntRegs:$src1, s11_0ImmPred:$offset))), - (i32 (L2_loadrub_io IntRegs:$src1, s11_0ImmPred:$offset))>; +// post increment doubleword loads with immediate offset +let accessSize = DoubleWordAccess, opExtentAlign = 3 in +defm loadrd : LD_PostInc <"memd", "LDrid", DoubleRegs, s4_3Imm, 0b1110>; -// Load predicate. -let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13, -isPseudo = 1, Defs = [R10,R11,D5], hasSideEffects = 0 in -def LDriw_pred : LDInst2<(outs PredRegs:$dst), - (ins MEMri:$addr), - "Error; should not emit", - []>; - -// Deallocate stack frame. -let Defs = [R29, R30, R31], Uses = [R29], hasSideEffects = 0 in { - def DEALLOCFRAME : LDInst2<(outs), (ins), - "deallocframe", - []>; +// Rd=memb[u]h(Rx++#s4:1) +// Rdd=memb[u]h(Rx++#s4:2) +let accessSize = HalfWordAccess, opExtentAlign = 1 in { + def L2_loadbsw2_pi : T_load_pi <"membh", IntRegs, s4_1Imm, 0b0001>; + def L2_loadbzw2_pi : T_load_pi <"memubh", IntRegs, s4_1Imm, 0b0011>; +} +let accessSize = WordAccess, opExtentAlign = 2, hasNewValue = 0 in { + def L2_loadbsw4_pi : T_load_pi <"membh", DoubleRegs, s4_2Imm, 0b0111>; + def L2_loadbzw4_pi : T_load_pi <"memubh", DoubleRegs, s4_2Imm, 0b0101>; } -// Load and unpack bytes to halfwords. //===----------------------------------------------------------------------===// -// LD - +// Template class for post increment fifo loads with immediate offset. //===----------------------------------------------------------------------===// +let hasSideEffects = 0, addrMode = PostInc in +class T_loadalign_pi MajOp > + : LDInstPI <(outs DoubleRegs:$dst, IntRegs:$dst2), + (ins DoubleRegs:$src1, IntRegs:$src2, ImmOp:$offset), + "$dst = "#mnemonic#"($src2++#$offset)" , + [], "$src2 = $dst2, $src1 = $dst" > , + PredNewRel { + bits<5> dst; + bits<5> src2; + bits<5> offset; + bits<4> offsetBits; -//===----------------------------------------------------------------------===// -// MTYPE/ALU + -//===----------------------------------------------------------------------===// -//===----------------------------------------------------------------------===// -// MTYPE/ALU - -//===----------------------------------------------------------------------===// + let offsetBits = !if (!eq(!cast(ImmOp), "s4_1Imm"), offset{4-1}, + /* s4_0Imm */ offset{3-0}); + let IClass = 0b1001; -//===----------------------------------------------------------------------===// -// MTYPE/COMPLEX + -//===----------------------------------------------------------------------===// -//===----------------------------------------------------------------------===// -// MTYPE/COMPLEX - -//===----------------------------------------------------------------------===// + let Inst{27-25} = 0b101; + let Inst{24-21} = MajOp; + let Inst{20-16} = src2; + let Inst{13-12} = 0b00; + let Inst{8-5} = offsetBits; + let Inst{4-0} = dst; + } + +// Ryy=memh_fifo(Rx++#s4:1) +// Ryy=memb_fifo(Rx++#s4:0) +let accessSize = ByteAccess in +def L2_loadalignb_pi : T_loadalign_pi <"memb_fifo", s4_0Imm, 0b0100>; + +let accessSize = HalfWordAccess, opExtentAlign = 1 in +def L2_loadalignh_pi : T_loadalign_pi <"memh_fifo", s4_1Imm, 0b0010>; //===----------------------------------------------------------------------===// -// MTYPE/MPYH + +// Template class for post increment loads with register offset. //===----------------------------------------------------------------------===// +let hasSideEffects = 0, addrMode = PostInc in +class T_load_pr MajOp, + MemAccessSize AccessSz> + : LDInstPI <(outs RC:$dst, IntRegs:$_dst_), + (ins IntRegs:$src1, ModRegs:$src2), + "$dst = "#mnemonic#"($src1++$src2)" , + [], "$src1 = $_dst_" > { + bits<5> dst; + bits<5> src1; + bits<1> src2; -//===----------------------------------------------------------------------===// + let accessSize = AccessSz; + let IClass = 0b1001; + + let Inst{27-25} = 0b110; + let Inst{24-21} = MajOp; + let Inst{20-16} = src1; + let Inst{13} = src2; + let Inst{12} = 0b0; + let Inst{7} = 0b0; + let Inst{4-0} = dst; + } + +let hasNewValue = 1 in { + def L2_loadrb_pr : T_load_pr <"memb", IntRegs, 0b1000, ByteAccess>; + def L2_loadrub_pr : T_load_pr <"memub", IntRegs, 0b1001, ByteAccess>; + def L2_loadrh_pr : T_load_pr <"memh", IntRegs, 0b1010, HalfWordAccess>; + def L2_loadruh_pr : T_load_pr <"memuh", IntRegs, 0b1011, HalfWordAccess>; + def L2_loadri_pr : T_load_pr <"memw", IntRegs, 0b1100, WordAccess>; + + def L2_loadbzw2_pr : T_load_pr <"memubh", IntRegs, 0b0011, HalfWordAccess>; +} + +def L2_loadrd_pr : T_load_pr <"memd", DoubleRegs, 0b1110, DoubleWordAccess>; +def L2_loadbzw4_pr : T_load_pr <"memubh", DoubleRegs, 0b0101, WordAccess>; + +// Load predicate. +let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 13, + isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in +def LDriw_pred : LDInst<(outs PredRegs:$dst), + (ins IntRegs:$addr, s11_2Ext:$off), + ".error \"should not emit\"", []>; + +let Defs = [R29, R30, R31], Uses = [R30], hasSideEffects = 0 in + def L2_deallocframe : LDInst<(outs), (ins), + "deallocframe", + []> { + let IClass = 0b1001; + + let Inst{27-16} = 0b000000011110; + let Inst{13} = 0b0; + let Inst{4-0} = 0b11110; +} + +// Load / Post increment circular addressing mode. +let Uses = [CS], hasSideEffects = 0 in +class T_load_pcr MajOp> + : LDInst <(outs RC:$dst, IntRegs:$_dst_), + (ins IntRegs:$Rz, ModRegs:$Mu), + "$dst = "#mnemonic#"($Rz ++ I:circ($Mu))", [], + "$Rz = $_dst_" > { + bits<5> dst; + bits<5> Rz; + bit Mu; + + let hasNewValue = !if (!eq(!cast(RC), "DoubleRegs"), 0, 1); + let IClass = 0b1001; + + let Inst{27-25} = 0b100; + let Inst{24-21} = MajOp; + let Inst{20-16} = Rz; + let Inst{13} = Mu; + let Inst{12} = 0b0; + let Inst{9} = 0b1; + let Inst{7} = 0b0; + let Inst{4-0} = dst; + } + +let accessSize = ByteAccess in { + def L2_loadrb_pcr : T_load_pcr <"memb", IntRegs, 0b1000>; + def L2_loadrub_pcr : T_load_pcr <"memub", IntRegs, 0b1001>; +} + +let accessSize = HalfWordAccess in { + def L2_loadrh_pcr : T_load_pcr <"memh", IntRegs, 0b1010>; + def L2_loadruh_pcr : T_load_pcr <"memuh", IntRegs, 0b1011>; + def L2_loadbsw2_pcr : T_load_pcr <"membh", IntRegs, 0b0001>; + def L2_loadbzw2_pcr : T_load_pcr <"memubh", IntRegs, 0b0011>; +} + +let accessSize = WordAccess in { + def L2_loadri_pcr : T_load_pcr <"memw", IntRegs, 0b1100>; + let hasNewValue = 0 in { + def L2_loadbzw4_pcr : T_load_pcr <"memubh", DoubleRegs, 0b0101>; + def L2_loadbsw4_pcr : T_load_pcr <"membh", DoubleRegs, 0b0111>; + } +} + +let accessSize = DoubleWordAccess in +def L2_loadrd_pcr : T_load_pcr <"memd", DoubleRegs, 0b1110>; + +// Load / Post increment circular addressing mode. +let Uses = [CS], hasSideEffects = 0 in +class T_loadalign_pcr MajOp, MemAccessSize AccessSz > + : LDInst <(outs DoubleRegs:$dst, IntRegs:$_dst_), + (ins DoubleRegs:$_src_, IntRegs:$Rz, ModRegs:$Mu), + "$dst = "#mnemonic#"($Rz ++ I:circ($Mu))", [], + "$Rz = $_dst_, $dst = $_src_" > { + bits<5> dst; + bits<5> Rz; + bit Mu; + + let accessSize = AccessSz; + let IClass = 0b1001; + + let Inst{27-25} = 0b100; + let Inst{24-21} = MajOp; + let Inst{20-16} = Rz; + let Inst{13} = Mu; + let Inst{12} = 0b0; + let Inst{9} = 0b1; + let Inst{7} = 0b0; + let Inst{4-0} = dst; + } + +def L2_loadalignb_pcr : T_loadalign_pcr <"memb_fifo", 0b0100, ByteAccess>; +def L2_loadalignh_pcr : T_loadalign_pcr <"memh_fifo", 0b0010, HalfWordAccess>; + +//===----------------------------------------------------------------------===// +// Circular loads with immediate offset. +//===----------------------------------------------------------------------===// +let Uses = [CS], mayLoad = 1, hasSideEffects = 0 in +class T_load_pci MajOp> + : LDInstPI<(outs RC:$dst, IntRegs:$_dst_), + (ins IntRegs:$Rz, ImmOp:$offset, ModRegs:$Mu), + "$dst = "#mnemonic#"($Rz ++ #$offset:circ($Mu))", [], + "$Rz = $_dst_"> { + bits<5> dst; + bits<5> Rz; + bits<1> Mu; + bits<7> offset; + bits<4> offsetBits; + + string ImmOpStr = !cast(ImmOp); + let hasNewValue = !if (!eq(!cast(RC), "DoubleRegs"), 0, 1); + let offsetBits = !if (!eq(ImmOpStr, "s4_3Imm"), offset{6-3}, + !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2}, + !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1}, + /* s4_0Imm */ offset{3-0}))); + let IClass = 0b1001; + let Inst{27-25} = 0b100; + let Inst{24-21} = MajOp; + let Inst{20-16} = Rz; + let Inst{13} = Mu; + let Inst{12} = 0b0; + let Inst{9} = 0b0; + let Inst{8-5} = offsetBits; + let Inst{4-0} = dst; + } + +// Byte variants of circ load +let accessSize = ByteAccess in { + def L2_loadrb_pci : T_load_pci <"memb", IntRegs, s4_0Imm, 0b1000>; + def L2_loadrub_pci : T_load_pci <"memub", IntRegs, s4_0Imm, 0b1001>; +} + +// Half word variants of circ load +let accessSize = HalfWordAccess in { + def L2_loadrh_pci : T_load_pci <"memh", IntRegs, s4_1Imm, 0b1010>; + def L2_loadruh_pci : T_load_pci <"memuh", IntRegs, s4_1Imm, 0b1011>; + def L2_loadbzw2_pci : T_load_pci <"memubh", IntRegs, s4_1Imm, 0b0011>; + def L2_loadbsw2_pci : T_load_pci <"membh", IntRegs, s4_1Imm, 0b0001>; +} + +// Word variants of circ load +let accessSize = WordAccess in +def L2_loadri_pci : T_load_pci <"memw", IntRegs, s4_2Imm, 0b1100>; + +let accessSize = WordAccess, hasNewValue = 0 in { + def L2_loadbzw4_pci : T_load_pci <"memubh", DoubleRegs, s4_2Imm, 0b0101>; + def L2_loadbsw4_pci : T_load_pci <"membh", DoubleRegs, s4_2Imm, 0b0111>; +} + +let accessSize = DoubleWordAccess, hasNewValue = 0 in +def L2_loadrd_pci : T_load_pci <"memd", DoubleRegs, s4_3Imm, 0b1110>; + +//===----------------------------------------------------------------------===// +// Circular loads - Pseudo +// +// Please note that the input operand order in the pseudo instructions +// doesn't match with the real instructions. Pseudo instructions operand +// order should mimics the ordering in the intrinsics. Also, 'src2' doesn't +// appear in the AsmString because it's same as 'dst'. +//===----------------------------------------------------------------------===// +let isCodeGenOnly = 1, mayLoad = 1, hasSideEffects = 0, isPseudo = 1 in +class T_load_pci_pseudo + : LDInstPI<(outs IntRegs:$_dst_, RC:$dst), + (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4Imm:$src4), + ".error \"$dst = "#opc#"($src1++#$src4:circ($src3))\"", + [], "$src1 = $_dst_">; + +def L2_loadrb_pci_pseudo : T_load_pci_pseudo <"memb", IntRegs>; +def L2_loadrub_pci_pseudo : T_load_pci_pseudo <"memub", IntRegs>; +def L2_loadrh_pci_pseudo : T_load_pci_pseudo <"memh", IntRegs>; +def L2_loadruh_pci_pseudo : T_load_pci_pseudo <"memuh", IntRegs>; +def L2_loadri_pci_pseudo : T_load_pci_pseudo <"memw", IntRegs>; +def L2_loadrd_pci_pseudo : T_load_pci_pseudo <"memd", DoubleRegs>; + + +// TODO: memb_fifo and memh_fifo must take destination register as input. +// One-off circ loads - not enough in common to break into a class. +let accessSize = ByteAccess in +def L2_loadalignb_pci : T_load_pci <"memb_fifo", DoubleRegs, s4_0Imm, 0b0100>; + +let accessSize = HalfWordAccess, opExtentAlign = 1 in +def L2_loadalignh_pci : T_load_pci <"memh_fifo", DoubleRegs, s4_1Imm, 0b0010>; + +// L[24]_load[wd]_locked: Load word/double with lock. +let isSoloAX = 1 in +class T_load_locked + : LD0Inst <(outs RC:$dst), + (ins IntRegs:$src), + "$dst = "#mnemonic#"($src)"> { + bits<5> dst; + bits<5> src; + let IClass = 0b1001; + let Inst{27-21} = 0b0010000; + let Inst{20-16} = src; + let Inst{13-12} = !if (!eq(mnemonic, "memd_locked"), 0b01, 0b00); + let Inst{5} = 0; + let Inst{4-0} = dst; +} +let hasNewValue = 1, accessSize = WordAccess, opNewValue = 0 in + def L2_loadw_locked : T_load_locked <"memw_locked", IntRegs>; +let accessSize = DoubleWordAccess in + def L4_loadd_locked : T_load_locked <"memd_locked", DoubleRegs>; + +// S[24]_store[wd]_locked: Store word/double conditionally. +let isSoloAX = 1, isPredicateLate = 1 in +class T_store_locked + : ST0Inst <(outs PredRegs:$Pd), (ins IntRegs:$Rs, RC:$Rt), + mnemonic#"($Rs, $Pd) = $Rt"> { + bits<2> Pd; + bits<5> Rs; + bits<5> Rt; + + let IClass = 0b1010; + let Inst{27-23} = 0b00001; + let Inst{22} = !if (!eq(mnemonic, "memw_locked"), 0b0, 0b1); + let Inst{21} = 0b1; + let Inst{20-16} = Rs; + let Inst{12-8} = Rt; + let Inst{1-0} = Pd; +} + +let accessSize = WordAccess in +def S2_storew_locked : T_store_locked <"memw_locked", IntRegs>; + +let accessSize = DoubleWordAccess in +def S4_stored_locked : T_store_locked <"memd_locked", DoubleRegs>; + +//===----------------------------------------------------------------------===// +// Bit-reversed loads with auto-increment register +//===----------------------------------------------------------------------===// +let hasSideEffects = 0 in +class T_load_pbr majOp> + : LDInst + <(outs RC:$dst, IntRegs:$_dst_), + (ins IntRegs:$Rz, ModRegs:$Mu), + "$dst = "#mnemonic#"($Rz ++ $Mu:brev)" , + [] , "$Rz = $_dst_" > { + + let accessSize = addrSize; + + bits<5> dst; + bits<5> Rz; + bits<1> Mu; + + let IClass = 0b1001; + + let Inst{27-25} = 0b111; + let Inst{24-21} = majOp; + let Inst{20-16} = Rz; + let Inst{13} = Mu; + let Inst{12} = 0b0; + let Inst{7} = 0b0; + let Inst{4-0} = dst; + } + +let hasNewValue =1, opNewValue = 0 in { + def L2_loadrb_pbr : T_load_pbr <"memb", IntRegs, ByteAccess, 0b1000>; + def L2_loadrub_pbr : T_load_pbr <"memub", IntRegs, ByteAccess, 0b1001>; + def L2_loadrh_pbr : T_load_pbr <"memh", IntRegs, HalfWordAccess, 0b1010>; + def L2_loadruh_pbr : T_load_pbr <"memuh", IntRegs, HalfWordAccess, 0b1011>; + def L2_loadbsw2_pbr : T_load_pbr <"membh", IntRegs, HalfWordAccess, 0b0001>; + def L2_loadbzw2_pbr : T_load_pbr <"memubh", IntRegs, HalfWordAccess, 0b0011>; + def L2_loadri_pbr : T_load_pbr <"memw", IntRegs, WordAccess, 0b1100>; +} + +def L2_loadbzw4_pbr : T_load_pbr <"memubh", DoubleRegs, WordAccess, 0b0101>; +def L2_loadbsw4_pbr : T_load_pbr <"membh", DoubleRegs, WordAccess, 0b0111>; +def L2_loadrd_pbr : T_load_pbr <"memd", DoubleRegs, DoubleWordAccess, 0b1110>; + +def L2_loadalignb_pbr :T_load_pbr <"memb_fifo", DoubleRegs, ByteAccess, 0b0100>; +def L2_loadalignh_pbr :T_load_pbr <"memh_fifo", DoubleRegs, + HalfWordAccess, 0b0010>; + +//===----------------------------------------------------------------------===// +// Bit-reversed loads - Pseudo +// +// Please note that 'src2' doesn't appear in the AsmString because +// it's same as 'dst'. +//===----------------------------------------------------------------------===// +let isCodeGenOnly = 1, mayLoad = 1, hasSideEffects = 0, isPseudo = 1 in +class T_load_pbr_pseudo + : LDInstPI<(outs IntRegs:$_dst_, RC:$dst), + (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), + ".error \"$dst = "#opc#"($src1++$src3:brev)\"", + [], "$src1 = $_dst_">; + +def L2_loadrb_pbr_pseudo : T_load_pbr_pseudo <"memb", IntRegs>; +def L2_loadrub_pbr_pseudo : T_load_pbr_pseudo <"memub", IntRegs>; +def L2_loadrh_pbr_pseudo : T_load_pbr_pseudo <"memh", IntRegs>; +def L2_loadruh_pbr_pseudo : T_load_pbr_pseudo <"memuh", IntRegs>; +def L2_loadri_pbr_pseudo : T_load_pbr_pseudo <"memw", IntRegs>; +def L2_loadrd_pbr_pseudo : T_load_pbr_pseudo <"memd", DoubleRegs>; + +//===----------------------------------------------------------------------===// +// LD - +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// MTYPE/ALU + +//===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// MTYPE/ALU - +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// MTYPE/COMPLEX + +//===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// MTYPE/COMPLEX - +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// MTYPE/MPYH + +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// // Template Class // MPYS / Multipy signed/unsigned halfwords //Rd=mpy[u](Rs.[H|L],Rt.[H|L])[:<<1][:rnd][:sat] @@ -1845,7 +2358,6 @@ class T_M2_mpy < bits<2> LHbits, bit isSat, bit isRnd, } //Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1] -let isCodeGenOnly = 0 in { def M2_mpy_ll_s1: T_M2_mpy<0b00, 0, 0, 1, 0>; def M2_mpy_ll_s0: T_M2_mpy<0b00, 0, 0, 0, 0>; def M2_mpy_lh_s1: T_M2_mpy<0b01, 0, 0, 1, 0>; @@ -1854,10 +2366,8 @@ def M2_mpy_hl_s1: T_M2_mpy<0b10, 0, 0, 1, 0>; def M2_mpy_hl_s0: T_M2_mpy<0b10, 0, 0, 0, 0>; def M2_mpy_hh_s1: T_M2_mpy<0b11, 0, 0, 1, 0>; def M2_mpy_hh_s0: T_M2_mpy<0b11, 0, 0, 0, 0>; -} //Rd=mpyu(Rs.[H|L],Rt.[H|L])[:<<1] -let isCodeGenOnly = 0 in { def M2_mpyu_ll_s1: T_M2_mpy<0b00, 0, 0, 1, 1>; def M2_mpyu_ll_s0: T_M2_mpy<0b00, 0, 0, 0, 1>; def M2_mpyu_lh_s1: T_M2_mpy<0b01, 0, 0, 1, 1>; @@ -1866,10 +2376,8 @@ def M2_mpyu_hl_s1: T_M2_mpy<0b10, 0, 0, 1, 1>; def M2_mpyu_hl_s0: T_M2_mpy<0b10, 0, 0, 0, 1>; def M2_mpyu_hh_s1: T_M2_mpy<0b11, 0, 0, 1, 1>; def M2_mpyu_hh_s0: T_M2_mpy<0b11, 0, 0, 0, 1>; -} //Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1]:rnd -let isCodeGenOnly = 0 in { def M2_mpy_rnd_ll_s1: T_M2_mpy <0b00, 0, 1, 1, 0>; def M2_mpy_rnd_ll_s0: T_M2_mpy <0b00, 0, 1, 0, 0>; def M2_mpy_rnd_lh_s1: T_M2_mpy <0b01, 0, 1, 1, 0>; @@ -1878,11 +2386,10 @@ def M2_mpy_rnd_hl_s1: T_M2_mpy <0b10, 0, 1, 1, 0>; def M2_mpy_rnd_hl_s0: T_M2_mpy <0b10, 0, 1, 0, 0>; def M2_mpy_rnd_hh_s1: T_M2_mpy <0b11, 0, 1, 1, 0>; def M2_mpy_rnd_hh_s0: T_M2_mpy <0b11, 0, 1, 0, 0>; -} //Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1][:sat] //Rd=mpy(Rs.[H|L],Rt.[H|L])[:<<1][:rnd][:sat] -let Defs = [USR_OVF], isCodeGenOnly = 0 in { +let Defs = [USR_OVF] in { def M2_mpy_sat_ll_s1: T_M2_mpy <0b00, 1, 0, 1, 0>; def M2_mpy_sat_ll_s0: T_M2_mpy <0b00, 1, 0, 0, 0>; def M2_mpy_sat_lh_s1: T_M2_mpy <0b01, 1, 0, 1, 0>; @@ -1936,7 +2443,6 @@ class T_M2_mpy_acc < bits<2> LHbits, bit isSat, bit isNac, } //Rx += mpy(Rs.[H|L],Rt.[H|L])[:<<1] -let isCodeGenOnly = 0 in { def M2_mpy_acc_ll_s1: T_M2_mpy_acc <0b00, 0, 0, 1, 0>; def M2_mpy_acc_ll_s0: T_M2_mpy_acc <0b00, 0, 0, 0, 0>; def M2_mpy_acc_lh_s1: T_M2_mpy_acc <0b01, 0, 0, 1, 0>; @@ -1945,10 +2451,8 @@ def M2_mpy_acc_hl_s1: T_M2_mpy_acc <0b10, 0, 0, 1, 0>; def M2_mpy_acc_hl_s0: T_M2_mpy_acc <0b10, 0, 0, 0, 0>; def M2_mpy_acc_hh_s1: T_M2_mpy_acc <0b11, 0, 0, 1, 0>; def M2_mpy_acc_hh_s0: T_M2_mpy_acc <0b11, 0, 0, 0, 0>; -} //Rx += mpyu(Rs.[H|L],Rt.[H|L])[:<<1] -let isCodeGenOnly = 0 in { def M2_mpyu_acc_ll_s1: T_M2_mpy_acc <0b00, 0, 0, 1, 1>; def M2_mpyu_acc_ll_s0: T_M2_mpy_acc <0b00, 0, 0, 0, 1>; def M2_mpyu_acc_lh_s1: T_M2_mpy_acc <0b01, 0, 0, 1, 1>; @@ -1957,10 +2461,8 @@ def M2_mpyu_acc_hl_s1: T_M2_mpy_acc <0b10, 0, 0, 1, 1>; def M2_mpyu_acc_hl_s0: T_M2_mpy_acc <0b10, 0, 0, 0, 1>; def M2_mpyu_acc_hh_s1: T_M2_mpy_acc <0b11, 0, 0, 1, 1>; def M2_mpyu_acc_hh_s0: T_M2_mpy_acc <0b11, 0, 0, 0, 1>; -} //Rx -= mpy(Rs.[H|L],Rt.[H|L])[:<<1] -let isCodeGenOnly = 0 in { def M2_mpy_nac_ll_s1: T_M2_mpy_acc <0b00, 0, 1, 1, 0>; def M2_mpy_nac_ll_s0: T_M2_mpy_acc <0b00, 0, 1, 0, 0>; def M2_mpy_nac_lh_s1: T_M2_mpy_acc <0b01, 0, 1, 1, 0>; @@ -1969,10 +2471,8 @@ def M2_mpy_nac_hl_s1: T_M2_mpy_acc <0b10, 0, 1, 1, 0>; def M2_mpy_nac_hl_s0: T_M2_mpy_acc <0b10, 0, 1, 0, 0>; def M2_mpy_nac_hh_s1: T_M2_mpy_acc <0b11, 0, 1, 1, 0>; def M2_mpy_nac_hh_s0: T_M2_mpy_acc <0b11, 0, 1, 0, 0>; -} //Rx -= mpyu(Rs.[H|L],Rt.[H|L])[:<<1] -let isCodeGenOnly = 0 in { def M2_mpyu_nac_ll_s1: T_M2_mpy_acc <0b00, 0, 1, 1, 1>; def M2_mpyu_nac_ll_s0: T_M2_mpy_acc <0b00, 0, 1, 0, 1>; def M2_mpyu_nac_lh_s1: T_M2_mpy_acc <0b01, 0, 1, 1, 1>; @@ -1981,10 +2481,8 @@ def M2_mpyu_nac_hl_s1: T_M2_mpy_acc <0b10, 0, 1, 1, 1>; def M2_mpyu_nac_hl_s0: T_M2_mpy_acc <0b10, 0, 1, 0, 1>; def M2_mpyu_nac_hh_s1: T_M2_mpy_acc <0b11, 0, 1, 1, 1>; def M2_mpyu_nac_hh_s0: T_M2_mpy_acc <0b11, 0, 1, 0, 1>; -} //Rx += mpy(Rs.[H|L],Rt.[H|L])[:<<1]:sat -let isCodeGenOnly = 0 in { def M2_mpy_acc_sat_ll_s1: T_M2_mpy_acc <0b00, 1, 0, 1, 0>; def M2_mpy_acc_sat_ll_s0: T_M2_mpy_acc <0b00, 1, 0, 0, 0>; def M2_mpy_acc_sat_lh_s1: T_M2_mpy_acc <0b01, 1, 0, 1, 0>; @@ -1993,10 +2491,8 @@ def M2_mpy_acc_sat_hl_s1: T_M2_mpy_acc <0b10, 1, 0, 1, 0>; def M2_mpy_acc_sat_hl_s0: T_M2_mpy_acc <0b10, 1, 0, 0, 0>; def M2_mpy_acc_sat_hh_s1: T_M2_mpy_acc <0b11, 1, 0, 1, 0>; def M2_mpy_acc_sat_hh_s0: T_M2_mpy_acc <0b11, 1, 0, 0, 0>; -} //Rx -= mpy(Rs.[H|L],Rt.[H|L])[:<<1]:sat -let isCodeGenOnly = 0 in { def M2_mpy_nac_sat_ll_s1: T_M2_mpy_acc <0b00, 1, 1, 1, 0>; def M2_mpy_nac_sat_ll_s0: T_M2_mpy_acc <0b00, 1, 1, 0, 0>; def M2_mpy_nac_sat_lh_s1: T_M2_mpy_acc <0b01, 1, 1, 1, 0>; @@ -2005,7 +2501,6 @@ def M2_mpy_nac_sat_hl_s1: T_M2_mpy_acc <0b10, 1, 1, 1, 0>; def M2_mpy_nac_sat_hl_s0: T_M2_mpy_acc <0b10, 1, 1, 0, 0>; def M2_mpy_nac_sat_hh_s1: T_M2_mpy_acc <0b11, 1, 1, 1, 0>; def M2_mpy_nac_sat_hh_s0: T_M2_mpy_acc <0b11, 1, 1, 0, 0>; -} //===----------------------------------------------------------------------===// // Template Class @@ -2039,7 +2534,6 @@ class T_M2_mpyd_acc < bits<2> LHbits, bit isNac, bit hasShift, bit isUnsigned> let Inst{12-8} = Rt; } -let isCodeGenOnly = 0 in { def M2_mpyd_acc_hh_s0: T_M2_mpyd_acc <0b11, 0, 0, 0>; def M2_mpyd_acc_hl_s0: T_M2_mpyd_acc <0b10, 0, 0, 0>; def M2_mpyd_acc_lh_s0: T_M2_mpyd_acc <0b01, 0, 0, 0>; @@ -2079,6 +2573,72 @@ def M2_mpyud_nac_hh_s1: T_M2_mpyd_acc <0b11, 1, 1, 1>; def M2_mpyud_nac_hl_s1: T_M2_mpyd_acc <0b10, 1, 1, 1>; def M2_mpyud_nac_lh_s1: T_M2_mpyd_acc <0b01, 1, 1, 1>; def M2_mpyud_nac_ll_s1: T_M2_mpyd_acc <0b00, 1, 1, 1>; + +//===----------------------------------------------------------------------===// +// Template Class -- Vector Multipy +// Used for complex multiply real or imaginary, dual multiply and even halfwords +//===----------------------------------------------------------------------===// +class T_M2_vmpy < string opc, bits<3> MajOp, bits<3> MinOp, bit hasShift, + bit isRnd, bit isSat > + : MInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), + "$Rdd = "#opc#"($Rss, $Rtt)"#!if(hasShift,":<<1","") + #!if(isRnd,":rnd","") + #!if(isSat,":sat",""), + [] > { + bits<5> Rdd; + bits<5> Rss; + bits<5> Rtt; + + let IClass = 0b1110; + + let Inst{27-24} = 0b1000; + let Inst{23-21} = MajOp; + let Inst{7-5} = MinOp; + let Inst{4-0} = Rdd; + let Inst{20-16} = Rss; + let Inst{12-8} = Rtt; + } + +// Vector complex multiply imaginary: Rdd=vcmpyi(Rss,Rtt)[:<<1]:sat +let Defs = [USR_OVF] in { +def M2_vcmpy_s1_sat_i: T_M2_vmpy <"vcmpyi", 0b110, 0b110, 1, 0, 1>; +def M2_vcmpy_s0_sat_i: T_M2_vmpy <"vcmpyi", 0b010, 0b110, 0, 0, 1>; + +// Vector complex multiply real: Rdd=vcmpyr(Rss,Rtt)[:<<1]:sat +def M2_vcmpy_s1_sat_r: T_M2_vmpy <"vcmpyr", 0b101, 0b110, 1, 0, 1>; +def M2_vcmpy_s0_sat_r: T_M2_vmpy <"vcmpyr", 0b001, 0b110, 0, 0, 1>; + +// Vector dual multiply: Rdd=vdmpy(Rss,Rtt)[:<<1]:sat +def M2_vdmpys_s1: T_M2_vmpy <"vdmpy", 0b100, 0b100, 1, 0, 1>; +def M2_vdmpys_s0: T_M2_vmpy <"vdmpy", 0b000, 0b100, 0, 0, 1>; + +// Vector multiply even halfwords: Rdd=vmpyeh(Rss,Rtt)[:<<1]:sat +def M2_vmpy2es_s1: T_M2_vmpy <"vmpyeh", 0b100, 0b110, 1, 0, 1>; +def M2_vmpy2es_s0: T_M2_vmpy <"vmpyeh", 0b000, 0b110, 0, 0, 1>; + +//Rdd=vmpywoh(Rss,Rtt)[:<<1][:rnd]:sat +def M2_mmpyh_s0: T_M2_vmpy <"vmpywoh", 0b000, 0b111, 0, 0, 1>; +def M2_mmpyh_s1: T_M2_vmpy <"vmpywoh", 0b100, 0b111, 1, 0, 1>; +def M2_mmpyh_rs0: T_M2_vmpy <"vmpywoh", 0b001, 0b111, 0, 1, 1>; +def M2_mmpyh_rs1: T_M2_vmpy <"vmpywoh", 0b101, 0b111, 1, 1, 1>; + +//Rdd=vmpyweh(Rss,Rtt)[:<<1][:rnd]:sat +def M2_mmpyl_s0: T_M2_vmpy <"vmpyweh", 0b000, 0b101, 0, 0, 1>; +def M2_mmpyl_s1: T_M2_vmpy <"vmpyweh", 0b100, 0b101, 1, 0, 1>; +def M2_mmpyl_rs0: T_M2_vmpy <"vmpyweh", 0b001, 0b101, 0, 1, 1>; +def M2_mmpyl_rs1: T_M2_vmpy <"vmpyweh", 0b101, 0b101, 1, 1, 1>; + +//Rdd=vmpywouh(Rss,Rtt)[:<<1][:rnd]:sat +def M2_mmpyuh_s0: T_M2_vmpy <"vmpywouh", 0b010, 0b111, 0, 0, 1>; +def M2_mmpyuh_s1: T_M2_vmpy <"vmpywouh", 0b110, 0b111, 1, 0, 1>; +def M2_mmpyuh_rs0: T_M2_vmpy <"vmpywouh", 0b011, 0b111, 0, 1, 1>; +def M2_mmpyuh_rs1: T_M2_vmpy <"vmpywouh", 0b111, 0b111, 1, 1, 1>; + +//Rdd=vmpyweuh(Rss,Rtt)[:<<1][:rnd]:sat +def M2_mmpyul_s0: T_M2_vmpy <"vmpyweuh", 0b010, 0b101, 0, 0, 1>; +def M2_mmpyul_s1: T_M2_vmpy <"vmpyweuh", 0b110, 0b101, 1, 0, 1>; +def M2_mmpyul_rs0: T_M2_vmpy <"vmpyweuh", 0b011, 0b101, 0, 1, 1>; +def M2_mmpyul_rs1: T_M2_vmpy <"vmpyweuh", 0b111, 0b101, 1, 1, 1>; } let hasNewValue = 1, opNewValue = 0 in @@ -2107,6 +2667,9 @@ class T_MType_mpy RegTyBits, RegisterClass RC, let Inst{4-0} = dst; } +class T_MType_vrcmpy MajOp, bits<3> MinOp, bit isHi> + : T_MType_mpy ; + class T_MType_dd MajOp, bits<3> MinOp, bit isSat = 0, bit isRnd = 0 > : T_MType_mpy ; @@ -2119,30 +2682,37 @@ class T_MType_rr2 MajOp, bits<3> MinOp, bit isSat = 0, bit isRnd = 0, string op2str = "" > : T_MType_mpy; -let CextOpcode = "mpyi", InputType = "reg", isCodeGenOnly = 0 in +def M2_vradduh : T_MType_dd <"vradduh", 0b000, 0b001, 0, 0>; +def M2_vdmpyrs_s0 : T_MType_dd <"vdmpy", 0b000, 0b000, 1, 1>; +def M2_vdmpyrs_s1 : T_MType_dd <"vdmpy", 0b100, 0b000, 1, 1>; + +let CextOpcode = "mpyi", InputType = "reg" in def M2_mpyi : T_MType_rr1 <"mpyi", 0b000, 0b000>, ImmRegRel; -let isCodeGenOnly = 0 in { def M2_mpy_up : T_MType_rr1 <"mpy", 0b000, 0b001>; def M2_mpyu_up : T_MType_rr1 <"mpyu", 0b010, 0b001>; -} -let isCodeGenOnly = 0 in def M2_dpmpyss_rnd_s0 : T_MType_rr1 <"mpy", 0b001, 0b001, 0, 1>; -let isCodeGenOnly = 0 in { +def M2_vmpy2s_s0pack : T_MType_rr1 <"vmpyh", 0b001, 0b111, 1, 1>; +def M2_vmpy2s_s1pack : T_MType_rr1 <"vmpyh", 0b101, 0b111, 1, 1>; + def M2_hmmpyh_rs1 : T_MType_rr2 <"mpy", 0b101, 0b100, 1, 1, ".h">; def M2_hmmpyl_rs1 : T_MType_rr2 <"mpy", 0b111, 0b100, 1, 1, ".l">; -} + +def M2_cmpyrs_s0 : T_MType_rr2 <"cmpy", 0b001, 0b110, 1, 1>; +def M2_cmpyrs_s1 : T_MType_rr2 <"cmpy", 0b101, 0b110, 1, 1>; +def M2_cmpyrsc_s0 : T_MType_rr2 <"cmpy", 0b011, 0b110, 1, 1, "*">; +def M2_cmpyrsc_s1 : T_MType_rr2 <"cmpy", 0b111, 0b110, 1, 1, "*">; // V4 Instructions -let isCodeGenOnly = 0 in { +def M2_vraddh : T_MType_dd <"vraddh", 0b001, 0b111, 0>; def M2_mpysu_up : T_MType_rr1 <"mpysu", 0b011, 0b001, 0>; +def M2_mpy_up_s1 : T_MType_rr1 <"mpy", 0b101, 0b010, 0>; def M2_mpy_up_s1_sat : T_MType_rr1 <"mpy", 0b111, 0b000, 1>; def M2_hmmpyh_s1 : T_MType_rr2 <"mpy", 0b101, 0b000, 1, 0, ".h">; def M2_hmmpyl_s1 : T_MType_rr2 <"mpy", 0b101, 0b001, 1, 0, ".l">; -} def: Pat<(i32 (mul I32:$src1, I32:$src2)), (M2_mpyi I32:$src1, I32:$src2)>; def: Pat<(i32 (mulhs I32:$src1, I32:$src2)), (M2_mpy_up I32:$src1, I32:$src2)>; @@ -2167,11 +2737,10 @@ class T_MType_mpy_ri pattern> let Inst{12-5} = u8; } -let isExtendable = 1, opExtentBits = 8, opExtendable = 2, isCodeGenOnly = 0 in +let isExtendable = 1, opExtentBits = 8, opExtendable = 2 in def M2_mpysip : T_MType_mpy_ri <0, u8Ext, - [(set (i32 IntRegs:$Rd), (mul IntRegs:$Rs, u8ExtPred:$u8))]>; + [(set (i32 IntRegs:$Rd), (mul IntRegs:$Rs, u32ImmPred:$u8))]>; -let isCodeGenOnly = 0 in def M2_mpysin : T_MType_mpy_ri <1, u8Imm, [(set (i32 IntRegs:$Rd), (ineg (mul IntRegs:$Rs, u8ImmPred:$u8)))]>; @@ -2187,11 +2756,12 @@ def M2_mpyui : MInst<(outs IntRegs:$dst), // Assembler maps to either Rd=+mpyi(Rs,#u8) or Rd=-mpyi(Rs,#u8) // depending on the value of m9. See Arch Spec. let isExtendable = 1, opExtendable = 2, isExtentSigned = 1, opExtentBits = 9, - CextOpcode = "mpyi", InputType = "imm", hasNewValue = 1 in + CextOpcode = "mpyi", InputType = "imm", hasNewValue = 1, + isAsmParserOnly = 1 in def M2_mpysmi : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s9Ext:$src2), "$dst = mpyi($src1, #$src2)", [(set (i32 IntRegs:$dst), (mul (i32 IntRegs:$src1), - s9ExtPred:$src2))]>, ImmRegRel; + s32ImmPred:$src2))]>, ImmRegRel; let hasNewValue = 1, isExtendable = 1, opExtentBits = 8, opExtendable = 3, InputType = "imm" in @@ -2239,10 +2809,10 @@ class T_MType_acc_rr MajOp, bits<3> MinOp, let Inst{4-0} = dst; } -let CextOpcode = "MPYI_acc", Itinerary = M_tc_3x_SLOT23, isCodeGenOnly = 0 in { +let CextOpcode = "MPYI_acc", Itinerary = M_tc_3x_SLOT23 in { def M2_macsip : T_MType_acc_ri <"+= mpyi", 0b010, u8Ext, [(set (i32 IntRegs:$dst), - (add (mul IntRegs:$src2, u8ExtPred:$src3), + (add (mul IntRegs:$src2, u32ImmPred:$src3), IntRegs:$src1))]>, ImmRegRel; def M2_maci : T_MType_acc_rr <"+= mpyi", 0b000, 0b000, 0, @@ -2251,11 +2821,11 @@ let CextOpcode = "MPYI_acc", Itinerary = M_tc_3x_SLOT23, isCodeGenOnly = 0 in { IntRegs:$src1))]>, ImmRegRel; } -let CextOpcode = "ADD_acc", isCodeGenOnly = 0 in { +let CextOpcode = "ADD_acc" in { let isExtentSigned = 1 in def M2_accii : T_MType_acc_ri <"+= add", 0b100, s8Ext, [(set (i32 IntRegs:$dst), - (add (add (i32 IntRegs:$src2), s8_16ExtPred:$src3), + (add (add (i32 IntRegs:$src2), s32ImmPred:$src3), (i32 IntRegs:$src1)))]>, ImmRegRel; def M2_acci : T_MType_acc_rr <"+= add", 0b000, 0b001, 0, @@ -2264,20 +2834,18 @@ let CextOpcode = "ADD_acc", isCodeGenOnly = 0 in { (i32 IntRegs:$src1)))]>, ImmRegRel; } -let CextOpcode = "SUB_acc", isCodeGenOnly = 0 in { +let CextOpcode = "SUB_acc" in { let isExtentSigned = 1 in def M2_naccii : T_MType_acc_ri <"-= add", 0b101, s8Ext>, ImmRegRel; def M2_nacci : T_MType_acc_rr <"-= add", 0b100, 0b001, 0>, ImmRegRel; } -let Itinerary = M_tc_3x_SLOT23, isCodeGenOnly = 0 in +let Itinerary = M_tc_3x_SLOT23 in def M2_macsin : T_MType_acc_ri <"-= mpyi", 0b011, u8Ext>; -let isCodeGenOnly = 0 in { def M2_xor_xacc : T_MType_acc_rr < "^= xor", 0b100, 0b011, 0>; def M2_subacc : T_MType_acc_rr <"+= sub", 0b000, 0b011, 1>; -} class T_MType_acc_pat1 @@ -2289,10 +2857,187 @@ class T_MType_acc_pat2 (MI IntRegs:$src1, IntRegs:$src2, IntRegs:$src3)>; def : T_MType_acc_pat2 ; -def : T_MType_acc_pat1 ; +def : T_MType_acc_pat1 ; -def : T_MType_acc_pat1 ; +def : T_MType_acc_pat1 ; def : T_MType_acc_pat2 ; + +//===----------------------------------------------------------------------===// +// Template Class -- XType Vector Instructions +//===----------------------------------------------------------------------===// +class T_XTYPE_Vect < string opc, bits<3> MajOp, bits<3> MinOp, bit isConj > + : MInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), + "$Rdd = "#opc#"($Rss, $Rtt"#!if(isConj,"*)",")"), + [] > { + bits<5> Rdd; + bits<5> Rss; + bits<5> Rtt; + + let IClass = 0b1110; + + let Inst{27-24} = 0b1000; + let Inst{23-21} = MajOp; + let Inst{7-5} = MinOp; + let Inst{4-0} = Rdd; + let Inst{20-16} = Rss; + let Inst{12-8} = Rtt; + } + +class T_XTYPE_Vect_acc < string opc, bits<3> MajOp, bits<3> MinOp, bit isConj > + : MInst <(outs DoubleRegs:$Rdd), + (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt), + "$Rdd += "#opc#"($Rss, $Rtt"#!if(isConj,"*)",")"), + [], "$dst2 = $Rdd",M_tc_3x_SLOT23 > { + bits<5> Rdd; + bits<5> Rss; + bits<5> Rtt; + + let IClass = 0b1110; + + let Inst{27-24} = 0b1010; + let Inst{23-21} = MajOp; + let Inst{7-5} = MinOp; + let Inst{4-0} = Rdd; + let Inst{20-16} = Rss; + let Inst{12-8} = Rtt; + } + +class T_XTYPE_Vect_diff < bits<3> MajOp, string opc > + : MInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rtt, DoubleRegs:$Rss), + "$Rdd = "#opc#"($Rtt, $Rss)", + [], "",M_tc_2_SLOT23 > { + bits<5> Rdd; + bits<5> Rss; + bits<5> Rtt; + + let IClass = 0b1110; + + let Inst{27-24} = 0b1000; + let Inst{23-21} = MajOp; + let Inst{7-5} = 0b000; + let Inst{4-0} = Rdd; + let Inst{20-16} = Rss; + let Inst{12-8} = Rtt; + } + +// Vector reduce add unsigned bytes: Rdd32=vrmpybu(Rss32,Rtt32) +def A2_vraddub: T_XTYPE_Vect <"vraddub", 0b010, 0b001, 0>; +def A2_vraddub_acc: T_XTYPE_Vect_acc <"vraddub", 0b010, 0b001, 0>; + +// Vector sum of absolute differences unsigned bytes: Rdd=vrsadub(Rss,Rtt) +def A2_vrsadub: T_XTYPE_Vect <"vrsadub", 0b010, 0b010, 0>; +def A2_vrsadub_acc: T_XTYPE_Vect_acc <"vrsadub", 0b010, 0b010, 0>; + +// Vector absolute difference: Rdd=vabsdiffh(Rtt,Rss) +def M2_vabsdiffh: T_XTYPE_Vect_diff<0b011, "vabsdiffh">; + +// Vector absolute difference words: Rdd=vabsdiffw(Rtt,Rss) +def M2_vabsdiffw: T_XTYPE_Vect_diff<0b001, "vabsdiffw">; + +// Vector reduce complex multiply real or imaginary: +// Rdd[+]=vrcmpy[ir](Rss,Rtt[*]) +def M2_vrcmpyi_s0: T_XTYPE_Vect <"vrcmpyi", 0b000, 0b000, 0>; +def M2_vrcmpyi_s0c: T_XTYPE_Vect <"vrcmpyi", 0b010, 0b000, 1>; +def M2_vrcmaci_s0: T_XTYPE_Vect_acc <"vrcmpyi", 0b000, 0b000, 0>; +def M2_vrcmaci_s0c: T_XTYPE_Vect_acc <"vrcmpyi", 0b010, 0b000, 1>; + +def M2_vrcmpyr_s0: T_XTYPE_Vect <"vrcmpyr", 0b000, 0b001, 0>; +def M2_vrcmpyr_s0c: T_XTYPE_Vect <"vrcmpyr", 0b011, 0b001, 1>; +def M2_vrcmacr_s0: T_XTYPE_Vect_acc <"vrcmpyr", 0b000, 0b001, 0>; +def M2_vrcmacr_s0c: T_XTYPE_Vect_acc <"vrcmpyr", 0b011, 0b001, 1>; + +// Vector reduce halfwords: +// Rdd[+]=vrmpyh(Rss,Rtt) +def M2_vrmpy_s0: T_XTYPE_Vect <"vrmpyh", 0b000, 0b010, 0>; +def M2_vrmac_s0: T_XTYPE_Vect_acc <"vrmpyh", 0b000, 0b010, 0>; + +//===----------------------------------------------------------------------===// +// Template Class -- Vector Multipy with accumulation. +// Used for complex multiply real or imaginary, dual multiply and even halfwords +//===----------------------------------------------------------------------===// +let Defs = [USR_OVF] in +class T_M2_vmpy_acc_sat < string opc, bits<3> MajOp, bits<3> MinOp, + bit hasShift, bit isRnd > + : MInst <(outs DoubleRegs:$Rxx), + (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt), + "$Rxx += "#opc#"($Rss, $Rtt)"#!if(hasShift,":<<1","") + #!if(isRnd,":rnd","")#":sat", + [], "$dst2 = $Rxx",M_tc_3x_SLOT23 > { + bits<5> Rxx; + bits<5> Rss; + bits<5> Rtt; + + let IClass = 0b1110; + + let Inst{27-24} = 0b1010; + let Inst{23-21} = MajOp; + let Inst{7-5} = MinOp; + let Inst{4-0} = Rxx; + let Inst{20-16} = Rss; + let Inst{12-8} = Rtt; + } + +class T_M2_vmpy_acc < string opc, bits<3> MajOp, bits<3> MinOp, + bit hasShift, bit isRnd > + : MInst <(outs DoubleRegs:$Rxx), + (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt), + "$Rxx += "#opc#"($Rss, $Rtt)"#!if(hasShift,":<<1","") + #!if(isRnd,":rnd",""), + [], "$dst2 = $Rxx",M_tc_3x_SLOT23 > { + bits<5> Rxx; + bits<5> Rss; + bits<5> Rtt; + + let IClass = 0b1110; + + let Inst{27-24} = 0b1010; + let Inst{23-21} = MajOp; + let Inst{7-5} = MinOp; + let Inst{4-0} = Rxx; + let Inst{20-16} = Rss; + let Inst{12-8} = Rtt; + } + +// Vector multiply word by signed half with accumulation +// Rxx+=vmpyw[eo]h(Rss,Rtt)[:<<1][:rnd]:sat +def M2_mmacls_s1: T_M2_vmpy_acc_sat <"vmpyweh", 0b100, 0b101, 1, 0>; +def M2_mmacls_s0: T_M2_vmpy_acc_sat <"vmpyweh", 0b000, 0b101, 0, 0>; +def M2_mmacls_rs1: T_M2_vmpy_acc_sat <"vmpyweh", 0b101, 0b101, 1, 1>; +def M2_mmacls_rs0: T_M2_vmpy_acc_sat <"vmpyweh", 0b001, 0b101, 0, 1>; + +def M2_mmachs_s1: T_M2_vmpy_acc_sat <"vmpywoh", 0b100, 0b111, 1, 0>; +def M2_mmachs_s0: T_M2_vmpy_acc_sat <"vmpywoh", 0b000, 0b111, 0, 0>; +def M2_mmachs_rs1: T_M2_vmpy_acc_sat <"vmpywoh", 0b101, 0b111, 1, 1>; +def M2_mmachs_rs0: T_M2_vmpy_acc_sat <"vmpywoh", 0b001, 0b111, 0, 1>; + +// Vector multiply word by unsigned half with accumulation +// Rxx+=vmpyw[eo]uh(Rss,Rtt)[:<<1][:rnd]:sat +def M2_mmaculs_s1: T_M2_vmpy_acc_sat <"vmpyweuh", 0b110, 0b101, 1, 0>; +def M2_mmaculs_s0: T_M2_vmpy_acc_sat <"vmpyweuh", 0b010, 0b101, 0, 0>; +def M2_mmaculs_rs1: T_M2_vmpy_acc_sat <"vmpyweuh", 0b111, 0b101, 1, 1>; +def M2_mmaculs_rs0: T_M2_vmpy_acc_sat <"vmpyweuh", 0b011, 0b101, 0, 1>; + +def M2_mmacuhs_s1: T_M2_vmpy_acc_sat <"vmpywouh", 0b110, 0b111, 1, 0>; +def M2_mmacuhs_s0: T_M2_vmpy_acc_sat <"vmpywouh", 0b010, 0b111, 0, 0>; +def M2_mmacuhs_rs1: T_M2_vmpy_acc_sat <"vmpywouh", 0b111, 0b111, 1, 1>; +def M2_mmacuhs_rs0: T_M2_vmpy_acc_sat <"vmpywouh", 0b011, 0b111, 0, 1>; + +// Vector multiply even halfwords with accumulation +// Rxx+=vmpyeh(Rss,Rtt)[:<<1][:sat] +def M2_vmac2es: T_M2_vmpy_acc <"vmpyeh", 0b001, 0b010, 0, 0>; +def M2_vmac2es_s1: T_M2_vmpy_acc_sat <"vmpyeh", 0b100, 0b110, 1, 0>; +def M2_vmac2es_s0: T_M2_vmpy_acc_sat <"vmpyeh", 0b000, 0b110, 0, 0>; + +// Vector dual multiply with accumulation +// Rxx+=vdmpy(Rss,Rtt)[:sat] +def M2_vdmacs_s1: T_M2_vmpy_acc_sat <"vdmpy", 0b100, 0b100, 1, 0>; +def M2_vdmacs_s0: T_M2_vmpy_acc_sat <"vdmpy", 0b000, 0b100, 0, 0>; + +// Vector complex multiply real or imaginary with accumulation +// Rxx+=vcmpy[ir](Rss,Rtt):sat +def M2_vcmac_s0_sat_r: T_M2_vmpy_acc_sat <"vcmpyr", 0b001, 0b100, 0, 0>; +def M2_vcmac_s0_sat_i: T_M2_vmpy_acc_sat <"vcmpyi", 0b010, 0b100, 0, 0>; + //===----------------------------------------------------------------------===// // Template Class -- Multiply signed/unsigned halfwords with and without // saturation and rounding @@ -2320,7 +3065,6 @@ class T_M2_mpyd < bits<2> LHbits, bit isRnd, bit hasShift, bit isUnsigned > let Inst{12-8} = Rt; } -let isCodeGenOnly = 0 in { def M2_mpyd_hh_s0: T_M2_mpyd<0b11, 0, 0, 0>; def M2_mpyd_hl_s0: T_M2_mpyd<0b10, 0, 0, 0>; def M2_mpyd_lh_s0: T_M2_mpyd<0b01, 0, 0, 0>; @@ -2351,7 +3095,7 @@ def M2_mpyud_hh_s1: T_M2_mpyd<0b11, 0, 1, 1>; def M2_mpyud_hl_s1: T_M2_mpyd<0b10, 0, 1, 1>; def M2_mpyud_lh_s1: T_M2_mpyd<0b01, 0, 1, 1>; def M2_mpyud_ll_s1: T_M2_mpyd<0b00, 0, 1, 1>; -} + //===----------------------------------------------------------------------===// // Template Class for xtype mpy: // Vector multiply @@ -2412,7 +3156,6 @@ class T_XTYPE_mpy64_acc MajOp, bits<3> MinOp, // MPY - Multiply and use full result // Rdd = mpy[u](Rs,Rt) -let isCodeGenOnly = 0 in { def M2_dpmpyss_s0 : T_XTYPE_mpy64 < "mpy", 0b000, 0b000, 0, 0, 0>; def M2_dpmpyuu_s0 : T_XTYPE_mpy64 < "mpyu", 0b010, 0b000, 0, 0, 0>; @@ -2421,7 +3164,48 @@ def M2_dpmpyss_acc_s0 : T_XTYPE_mpy64_acc < "mpy", "+", 0b000, 0b000, 0, 0, 0>; def M2_dpmpyss_nac_s0 : T_XTYPE_mpy64_acc < "mpy", "-", 0b001, 0b000, 0, 0, 0>; def M2_dpmpyuu_acc_s0 : T_XTYPE_mpy64_acc < "mpyu", "+", 0b010, 0b000, 0, 0, 0>; def M2_dpmpyuu_nac_s0 : T_XTYPE_mpy64_acc < "mpyu", "-", 0b011, 0b000, 0, 0, 0>; -} + +// Complex multiply real or imaginary +// Rxx=cmpy[ir](Rs,Rt) +def M2_cmpyi_s0 : T_XTYPE_mpy64 < "cmpyi", 0b000, 0b001, 0, 0, 0>; +def M2_cmpyr_s0 : T_XTYPE_mpy64 < "cmpyr", 0b000, 0b010, 0, 0, 0>; + +// Rxx+=cmpy[ir](Rs,Rt) +def M2_cmaci_s0 : T_XTYPE_mpy64_acc < "cmpyi", "+", 0b000, 0b001, 0, 0, 0>; +def M2_cmacr_s0 : T_XTYPE_mpy64_acc < "cmpyr", "+", 0b000, 0b010, 0, 0, 0>; + +// Complex multiply +// Rdd=cmpy(Rs,Rt)[:<<]:sat +def M2_cmpys_s0 : T_XTYPE_mpy64 < "cmpy", 0b000, 0b110, 1, 0, 0>; +def M2_cmpys_s1 : T_XTYPE_mpy64 < "cmpy", 0b100, 0b110, 1, 1, 0>; + +// Rdd=cmpy(Rs,Rt*)[:<<]:sat +def M2_cmpysc_s0 : T_XTYPE_mpy64 < "cmpy", 0b010, 0b110, 1, 0, 1>; +def M2_cmpysc_s1 : T_XTYPE_mpy64 < "cmpy", 0b110, 0b110, 1, 1, 1>; + +// Rxx[-+]=cmpy(Rs,Rt)[:<<1]:sat +def M2_cmacs_s0 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b000, 0b110, 1, 0, 0>; +def M2_cnacs_s0 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b000, 0b111, 1, 0, 0>; +def M2_cmacs_s1 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b100, 0b110, 1, 1, 0>; +def M2_cnacs_s1 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b100, 0b111, 1, 1, 0>; + +// Rxx[-+]=cmpy(Rs,Rt*)[:<<1]:sat +def M2_cmacsc_s0 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b010, 0b110, 1, 0, 1>; +def M2_cnacsc_s0 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b010, 0b111, 1, 0, 1>; +def M2_cmacsc_s1 : T_XTYPE_mpy64_acc < "cmpy", "+", 0b110, 0b110, 1, 1, 1>; +def M2_cnacsc_s1 : T_XTYPE_mpy64_acc < "cmpy", "-", 0b110, 0b111, 1, 1, 1>; + +// Vector multiply halfwords +// Rdd=vmpyh(Rs,Rt)[:<<]:sat +//let Defs = [USR_OVF] in { + def M2_vmpy2s_s1 : T_XTYPE_mpy64 < "vmpyh", 0b100, 0b101, 1, 1, 0>; + def M2_vmpy2s_s0 : T_XTYPE_mpy64 < "vmpyh", 0b000, 0b101, 1, 0, 0>; +//} + +// Rxx+=vmpyh(Rs,Rt)[:<<1][:sat] +def M2_vmac2 : T_XTYPE_mpy64_acc < "vmpyh", "+", 0b001, 0b001, 0, 0, 0>; +def M2_vmac2s_s1 : T_XTYPE_mpy64_acc < "vmpyh", "+", 0b100, 0b101, 1, 1, 0>; +def M2_vmac2s_s0 : T_XTYPE_mpy64_acc < "vmpyh", "+", 0b000, 0b101, 1, 0, 0>; def: Pat<(i64 (mul (i64 (anyext (i32 IntRegs:$src1))), (i64 (anyext (i32 IntRegs:$src2))))), @@ -2498,262 +3282,665 @@ def: Pat<(i64 (sub (i64 DoubleRegs:$src1), //===----------------------------------------------------------------------===// /// // Store doubleword. - //===----------------------------------------------------------------------===// -// Post increment store +// Template class for non-predicated post increment stores with immediate offset //===----------------------------------------------------------------------===// +let isPredicable = 1, hasSideEffects = 0, addrMode = PostInc in +class T_store_pi MajOp, bit isHalf > + : STInst <(outs IntRegs:$_dst_), + (ins IntRegs:$src1, ImmOp:$offset, RC:$src2), + mnemonic#"($src1++#$offset) = $src2"#!if(isHalf, ".h", ""), + [], "$src1 = $_dst_" >, + AddrModeRel { + bits<5> src1; + bits<5> src2; + bits<7> offset; + bits<4> offsetBits; + + string ImmOpStr = !cast(ImmOp); + let offsetBits = !if (!eq(ImmOpStr, "s4_3Imm"), offset{6-3}, + !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2}, + !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1}, + /* s4_0Imm */ offset{3-0}))); + // Store upper-half and store doubleword cannot be NV. + let isNVStorable = !if (!eq(ImmOpStr, "s4_3Imm"), 0, !if(isHalf,0,1)); + + let IClass = 0b1010; + + let Inst{27-25} = 0b101; + let Inst{24-21} = MajOp; + let Inst{20-16} = src1; + let Inst{13} = 0b0; + let Inst{12-8} = src2; + let Inst{7} = 0b0; + let Inst{6-3} = offsetBits; + let Inst{1} = 0b0; + } -multiclass ST_PostInc_Pbase { - let isPredicatedNew = isPredNew in - def NAME : STInst2PI<(outs IntRegs:$dst), +//===----------------------------------------------------------------------===// +// Template class for predicated post increment stores with immediate offset +//===----------------------------------------------------------------------===// +let isPredicated = 1, hasSideEffects = 0, addrMode = PostInc in +class T_pstore_pi MajOp, bit isHalf, bit isPredNot, bit isPredNew> + : STInst <(outs IntRegs:$_dst_), (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$offset, RC:$src3), - !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#mnemonic#"($src2++#$offset) = $src3", - [], - "$src2 = $dst">; -} + !if(isPredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", + ") ")#mnemonic#"($src2++#$offset) = $src3"#!if(isHalf, ".h", ""), + [], "$src2 = $_dst_" >, + AddrModeRel { + bits<2> src1; + bits<5> src2; + bits<7> offset; + bits<5> src3; + bits<4> offsetBits; -multiclass ST_PostInc_Pred { - let isPredicatedFalse = PredNot in { - defm _c#NAME : ST_PostInc_Pbase; - // Predicate new - let Predicates = [HasV4T], validSubTargets = HasV4SubT in - defm _cdn#NAME#_V4 : ST_PostInc_Pbase; + string ImmOpStr = !cast(ImmOp); + let offsetBits = !if (!eq(ImmOpStr, "s4_3Imm"), offset{6-3}, + !if (!eq(ImmOpStr, "s4_2Imm"), offset{5-2}, + !if (!eq(ImmOpStr, "s4_1Imm"), offset{4-1}, + /* s4_0Imm */ offset{3-0}))); + + // Store upper-half and store doubleword cannot be NV. + let isNVStorable = !if (!eq(ImmOpStr, "s4_3Imm"), 0, !if(isHalf,0,1)); + let isPredicatedNew = isPredNew; + let isPredicatedFalse = isPredNot; + + let IClass = 0b1010; + + let Inst{27-25} = 0b101; + let Inst{24-21} = MajOp; + let Inst{20-16} = src2; + let Inst{13} = 0b1; + let Inst{12-8} = src3; + let Inst{7} = isPredNew; + let Inst{6-3} = offsetBits; + let Inst{2} = isPredNot; + let Inst{1-0} = src1; } -} -let hasCtrlDep = 1, isNVStorable = 1, hasSideEffects = 0 in multiclass ST_PostInc { + Operand ImmOp, bits<4> MajOp, bit isHalf = 0 > { - let hasCtrlDep = 1, BaseOpcode = "POST_"#BaseOp in { - let isPredicable = 1 in - def NAME : STInst2PI<(outs IntRegs:$dst), - (ins IntRegs:$src1, ImmOp:$offset, RC:$src2), - mnemonic#"($src1++#$offset) = $src2", - [], - "$src1 = $dst">; - - let isPredicated = 1 in { - defm Pt : ST_PostInc_Pred; - defm NotPt : ST_PostInc_Pred; - } + let BaseOpcode = "POST_"#BaseOp in { + def S2_#NAME#_pi : T_store_pi ; + + // Predicated + def S2_p#NAME#t_pi : T_pstore_pi ; + def S2_p#NAME#f_pi : T_pstore_pi ; + + // Predicated new + def S2_p#NAME#tnew_pi : T_pstore_pi ; + def S2_p#NAME#fnew_pi : T_pstore_pi ; } } -defm POST_STbri: ST_PostInc <"memb", "STrib", IntRegs, s4_0Imm>, AddrModeRel; -defm POST_SThri: ST_PostInc <"memh", "STrih", IntRegs, s4_1Imm>, AddrModeRel; -defm POST_STwri: ST_PostInc <"memw", "STriw", IntRegs, s4_2Imm>, AddrModeRel; +let accessSize = ByteAccess in +defm storerb: ST_PostInc <"memb", "STrib", IntRegs, s4_0Imm, 0b1000>; -let isNVStorable = 0 in -defm POST_STdri: ST_PostInc <"memd", "STrid", DoubleRegs, s4_3Imm>, AddrModeRel; +let accessSize = HalfWordAccess in +defm storerh: ST_PostInc <"memh", "STrih", IntRegs, s4_1Imm, 0b1010>; -def : Pat<(post_truncsti8 (i32 IntRegs:$src1), IntRegs:$src2, - s4_3ImmPred:$offset), - (POST_STbri IntRegs:$src2, s4_0ImmPred:$offset, IntRegs:$src1)>; +let accessSize = WordAccess in +defm storeri: ST_PostInc <"memw", "STriw", IntRegs, s4_2Imm, 0b1100>; -def : Pat<(post_truncsti16 (i32 IntRegs:$src1), IntRegs:$src2, - s4_3ImmPred:$offset), - (POST_SThri IntRegs:$src2, s4_1ImmPred:$offset, IntRegs:$src1)>; +let accessSize = DoubleWordAccess in +defm storerd: ST_PostInc <"memd", "STrid", DoubleRegs, s4_3Imm, 0b1110>; -def : Pat<(post_store (i32 IntRegs:$src1), IntRegs:$src2, s4_2ImmPred:$offset), - (POST_STwri IntRegs:$src2, s4_1ImmPred:$offset, IntRegs:$src1)>; +let accessSize = HalfWordAccess, isNVStorable = 0 in +defm storerf: ST_PostInc <"memh", "STrih_H", IntRegs, s4_1Imm, 0b1011, 1>; -def : Pat<(post_store (i64 DoubleRegs:$src1), IntRegs:$src2, - s4_3ImmPred:$offset), - (POST_STdri IntRegs:$src2, s4_3ImmPred:$offset, DoubleRegs:$src1)>; +class Storepi_pat + : Pat<(Store Value:$src1, I32:$src2, Offset:$offset), + (MI I32:$src2, imm:$offset, Value:$src1)>; + +def: Storepi_pat; +def: Storepi_pat; +def: Storepi_pat; +def: Storepi_pat; //===----------------------------------------------------------------------===// -// multiclass for the store instructions with MEMri operand. +// Template class for post increment stores with register offset. //===----------------------------------------------------------------------===// -multiclass ST_MEMri_Pbase { - let isPredicatedNew = isPredNew in - def NAME : STInst2<(outs), - (ins PredRegs:$src1, MEMri:$addr, RC: $src2), - !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#mnemonic#"($addr) = $src2", - []>; -} +class T_store_pr MajOp, + MemAccessSize AccessSz, bit isHalf = 0> + : STInst <(outs IntRegs:$_dst_), + (ins IntRegs:$src1, ModRegs:$src2, RC:$src3), + mnemonic#"($src1++$src2) = $src3"#!if(isHalf, ".h", ""), + [], "$src1 = $_dst_" > { + bits<5> src1; + bits<1> src2; + bits<5> src3; + let accessSize = AccessSz; -multiclass ST_MEMri_Pred { - let isPredicatedFalse = PredNot in { - defm _c#NAME : ST_MEMri_Pbase; + // Store upper-half and store doubleword cannot be NV. + let isNVStorable = !if(!eq(mnemonic,"memd"), 0, !if(isHalf,0,1)); - // Predicate new - let validSubTargets = HasV4SubT, Predicates = [HasV4T] in - defm _cdn#NAME#_V4 : ST_MEMri_Pbase; + let IClass = 0b1010; + + let Inst{27-24} = 0b1101; + let Inst{23-21} = MajOp; + let Inst{20-16} = src1; + let Inst{13} = src2; + let Inst{12-8} = src3; + let Inst{7} = 0b0; } -} -let isExtendable = 1, isNVStorable = 1, hasSideEffects = 0 in -multiclass ST_MEMri ImmBits, bits<5> PredImmBits> { +def S2_storerb_pr : T_store_pr<"memb", IntRegs, 0b000, ByteAccess>; +def S2_storerh_pr : T_store_pr<"memh", IntRegs, 0b010, HalfWordAccess>; +def S2_storeri_pr : T_store_pr<"memw", IntRegs, 0b100, WordAccess>; +def S2_storerd_pr : T_store_pr<"memd", DoubleRegs, 0b110, DoubleWordAccess>; +def S2_storerf_pr : T_store_pr<"memh", IntRegs, 0b011, HalfWordAccess, 1>; - let CextOpcode = CextOp, BaseOpcode = CextOp in { - let opExtendable = 1, isExtentSigned = 1, opExtentBits = ImmBits, - isPredicable = 1 in - def NAME : STInst2<(outs), - (ins MEMri:$addr, RC:$src), - mnemonic#"($addr) = $src", - []>; - - let opExtendable = 2, isExtentSigned = 0, opExtentBits = PredImmBits, - isPredicated = 1 in { - defm Pt : ST_MEMri_Pred; - defm NotPt : ST_MEMri_Pred; - } - } -} +let opExtendable = 1, isExtentSigned = 1, isPredicable = 1 in +class T_store_io MajOp, bit isH = 0> + : STInst <(outs), + (ins IntRegs:$src1, ImmOp:$src2, RC:$src3), + mnemonic#"($src1+#$src2) = $src3"#!if(isH,".h","")>, + AddrModeRel, ImmRegRel { + bits<5> src1; + bits<14> src2; // Actual address offset + bits<5> src3; + bits<11> offsetBits; // Represents offset encoding -let addrMode = BaseImmOffset, isMEMri = "true" in { - let accessSize = ByteAccess in - defm STrib: ST_MEMri < "memb", "STrib", IntRegs, 11, 6>, AddrModeRel; + string ImmOpStr = !cast(ImmOp); - let accessSize = HalfWordAccess in - defm STrih: ST_MEMri < "memh", "STrih", IntRegs, 12, 7>, AddrModeRel; + let opExtentBits = !if (!eq(ImmOpStr, "s11_3Ext"), 14, + !if (!eq(ImmOpStr, "s11_2Ext"), 13, + !if (!eq(ImmOpStr, "s11_1Ext"), 12, + /* s11_0Ext */ 11))); + let offsetBits = !if (!eq(ImmOpStr, "s11_3Ext"), src2{13-3}, + !if (!eq(ImmOpStr, "s11_2Ext"), src2{12-2}, + !if (!eq(ImmOpStr, "s11_1Ext"), src2{11-1}, + /* s11_0Ext */ src2{10-0}))); + // Store upper-half and store doubleword cannot be NV. + let isNVStorable = !if (!eq(mnemonic, "memd"), 0, !if(isH,0,1)); + let IClass = 0b1010; + + let Inst{27} = 0b0; + let Inst{26-25} = offsetBits{10-9}; + let Inst{24} = 0b1; + let Inst{23-21} = MajOp; + let Inst{20-16} = src1; + let Inst{13} = offsetBits{8}; + let Inst{12-8} = src3; + let Inst{7-0} = offsetBits{7-0}; + } - let accessSize = WordAccess in - defm STriw: ST_MEMri < "memw", "STriw", IntRegs, 13, 8>, AddrModeRel; +let opExtendable = 2, isPredicated = 1 in +class T_pstore_io MajOp, bit PredNot, bit isPredNew, bit isH = 0> + : STInst <(outs), + (ins PredRegs:$src1, IntRegs:$src2, ImmOp:$src3, RC:$src4), + !if(PredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", + ") ")#mnemonic#"($src2+#$src3) = $src4"#!if(isH,".h",""), + [],"",V2LDST_tc_st_SLOT01 >, + AddrModeRel, ImmRegRel { + bits<2> src1; + bits<5> src2; + bits<9> src3; // Actual address offset + bits<5> src4; + bits<6> offsetBits; // Represents offset encoding - let accessSize = DoubleWordAccess, isNVStorable = 0 in - defm STrid: ST_MEMri < "memd", "STrid", DoubleRegs, 14, 9>, AddrModeRel; -} + let isPredicatedNew = isPredNew; + let isPredicatedFalse = PredNot; -def : Pat<(truncstorei8 (i32 IntRegs:$src1), ADDRriS11_0:$addr), - (STrib ADDRriS11_0:$addr, (i32 IntRegs:$src1))>; + string ImmOpStr = !cast(ImmOp); + let opExtentBits = !if (!eq(ImmOpStr, "u6_3Ext"), 9, + !if (!eq(ImmOpStr, "u6_2Ext"), 8, + !if (!eq(ImmOpStr, "u6_1Ext"), 7, + /* u6_0Ext */ 6))); + let offsetBits = !if (!eq(ImmOpStr, "u6_3Ext"), src3{8-3}, + !if (!eq(ImmOpStr, "u6_2Ext"), src3{7-2}, + !if (!eq(ImmOpStr, "u6_1Ext"), src3{6-1}, + /* u6_0Ext */ src3{5-0}))); + // Store upper-half and store doubleword cannot be NV. + let isNVStorable = !if (!eq(mnemonic, "memd"), 0, !if(isH,0,1)); -def : Pat<(truncstorei16 (i32 IntRegs:$src1), ADDRriS11_1:$addr), - (STrih ADDRriS11_1:$addr, (i32 IntRegs:$src1))>; + let IClass = 0b0100; -def : Pat<(store (i32 IntRegs:$src1), ADDRriS11_2:$addr), - (STriw ADDRriS11_2:$addr, (i32 IntRegs:$src1))>; + let Inst{27} = 0b0; + let Inst{26} = PredNot; + let Inst{25} = isPredNew; + let Inst{24} = 0b0; + let Inst{23-21} = MajOp; + let Inst{20-16} = src2; + let Inst{13} = offsetBits{5}; + let Inst{12-8} = src4; + let Inst{7-3} = offsetBits{4-0}; + let Inst{1-0} = src1; + } -def : Pat<(store (i64 DoubleRegs:$src1), ADDRriS11_3:$addr), - (STrid ADDRriS11_3:$addr, (i64 DoubleRegs:$src1))>; +let isExtendable = 1, hasSideEffects = 0 in +multiclass ST_Idxd MajOp, bit isH = 0> { + let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed in { + def S2_#NAME#_io : T_store_io ; + // Predicated + def S2_p#NAME#t_io : T_pstore_io; + def S2_p#NAME#f_io : T_pstore_io; -//===----------------------------------------------------------------------===// -// multiclass for the store instructions with base+immediate offset -// addressing mode -//===----------------------------------------------------------------------===// -multiclass ST_Idxd_Pbase { - let isPredicatedNew = isPredNew in - def NAME : STInst2<(outs), - (ins PredRegs:$src1, IntRegs:$src2, predImmOp:$src3, RC: $src4), - !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", - ") ")#mnemonic#"($src2+#$src3) = $src4", - []>; + // Predicated new + def S4_p#NAME#tnew_io : T_pstore_io ; + def S4_p#NAME#fnew_io : T_pstore_io ; + } } -multiclass ST_Idxd_Pred { - let isPredicatedFalse = PredNot, isPredicated = 1 in { - defm _c#NAME : ST_Idxd_Pbase; +let addrMode = BaseImmOffset, InputType = "imm" in { + let accessSize = ByteAccess in + defm storerb: ST_Idxd < "memb", "STrib", IntRegs, s11_0Ext, u6_0Ext, 0b000>; + + let accessSize = HalfWordAccess, opExtentAlign = 1 in + defm storerh: ST_Idxd < "memh", "STrih", IntRegs, s11_1Ext, u6_1Ext, 0b010>; + + let accessSize = WordAccess, opExtentAlign = 2 in + defm storeri: ST_Idxd < "memw", "STriw", IntRegs, s11_2Ext, u6_2Ext, 0b100>; + + let accessSize = DoubleWordAccess, isNVStorable = 0, opExtentAlign = 3 in + defm storerd: ST_Idxd < "memd", "STrid", DoubleRegs, s11_3Ext, + u6_3Ext, 0b110>; + + let accessSize = HalfWordAccess, opExtentAlign = 1 in + defm storerf: ST_Idxd < "memh", "STrif", IntRegs, s11_1Ext, + u6_1Ext, 0b011, 1>; +} + +// Patterns for generating stores, where the address takes different forms: +// - frameindex, +// - frameindex + offset, +// - base + offset, +// - simple (base address without offset). +// These would usually be used together (via Storex_pat defined below), but +// in some cases one may want to apply different properties (such as +// AddedComplexity) to the individual patterns. +class Storex_fi_pat + : Pat<(Store Value:$Rs, AddrFI:$fi), (MI AddrFI:$fi, 0, Value:$Rs)>; +class Storex_fi_add_pat + : Pat<(Store Value:$Rs, (add (i32 AddrFI:$fi), ImmPred:$Off)), + (MI AddrFI:$fi, imm:$Off, Value:$Rs)>; +class Storex_add_pat + : Pat<(Store Value:$Rt, (add (i32 IntRegs:$Rs), ImmPred:$Off)), + (MI IntRegs:$Rs, imm:$Off, Value:$Rt)>; +class Storex_simple_pat + : Pat<(Store Value:$Rt, (i32 IntRegs:$Rs)), + (MI IntRegs:$Rs, 0, Value:$Rt)>; + +// Patterns for generating stores, where the address takes different forms, +// and where the value being stored is transformed through the value modifier +// ValueMod. The address forms are same as above. +class Storexm_fi_pat + : Pat<(Store Value:$Rs, AddrFI:$fi), + (MI AddrFI:$fi, 0, (ValueMod Value:$Rs))>; +class Storexm_fi_add_pat + : Pat<(Store Value:$Rs, (add (i32 AddrFI:$fi), ImmPred:$Off)), + (MI AddrFI:$fi, imm:$Off, (ValueMod Value:$Rs))>; +class Storexm_add_pat + : Pat<(Store Value:$Rt, (add (i32 IntRegs:$Rs), ImmPred:$Off)), + (MI IntRegs:$Rs, imm:$Off, (ValueMod Value:$Rt))>; +class Storexm_simple_pat + : Pat<(Store Value:$Rt, (i32 IntRegs:$Rs)), + (MI IntRegs:$Rs, 0, (ValueMod Value:$Rt))>; + +multiclass Storex_pat { + def: Storex_fi_pat ; + def: Storex_fi_add_pat ; + def: Storex_add_pat ; +} + +multiclass Storexm_pat { + def: Storexm_fi_pat ; + def: Storexm_fi_add_pat ; + def: Storexm_add_pat ; +} + +// Regular stores in the DAG have two operands: value and address. +// Atomic stores also have two, but they are reversed: address, value. +// To use atomic stores with the patterns, they need to have their operands +// swapped. This relies on the knowledge that the F.Fragment uses names +// "ptr" and "val". +class SwapSt + : PatFrag<(ops node:$val, node:$ptr), F.Fragment>; - // Predicate new - let validSubTargets = HasV4SubT, Predicates = [HasV4T] in - defm _cdn#NAME#_V4 : ST_Idxd_Pbase; - } +let AddedComplexity = 20 in { + defm: Storex_pat; + defm: Storex_pat; + defm: Storex_pat; + defm: Storex_pat; + + defm: Storex_pat, I32, s32_0ImmPred, S2_storerb_io>; + defm: Storex_pat, I32, s31_1ImmPred, S2_storerh_io>; + defm: Storex_pat, I32, s30_2ImmPred, S2_storeri_io>; + defm: Storex_pat, I64, s29_3ImmPred, S2_storerd_io>; } -let isExtendable = 1, isNVStorable = 1, hasSideEffects = 0 in -multiclass ST_Idxd ImmBits, - bits<5> PredImmBits> { +// Simple patterns should be tried with the least priority. +def: Storex_simple_pat; +def: Storex_simple_pat; +def: Storex_simple_pat; +def: Storex_simple_pat; - let CextOpcode = CextOp, BaseOpcode = CextOp#_indexed in { - let opExtendable = 1, isExtentSigned = 1, opExtentBits = ImmBits, - isPredicable = 1 in - def NAME : STInst2<(outs), - (ins IntRegs:$src1, ImmOp:$src2, RC:$src3), - mnemonic#"($src1+#$src2) = $src3", - []>; +def: Storex_simple_pat, I32, S2_storerb_io>; +def: Storex_simple_pat, I32, S2_storerh_io>; +def: Storex_simple_pat, I32, S2_storeri_io>; +def: Storex_simple_pat, I64, S2_storerd_io>; - let opExtendable = 2, isExtentSigned = 0, opExtentBits = PredImmBits in { - defm Pt : ST_Idxd_Pred; - defm NotPt : ST_Idxd_Pred; - } - } +let AddedComplexity = 20 in { + defm: Storexm_pat; + defm: Storexm_pat; + defm: Storexm_pat; } -let addrMode = BaseImmOffset, InputType = "reg" in { - let accessSize = ByteAccess in - defm STrib_indexed: ST_Idxd < "memb", "STrib", IntRegs, s11_0Ext, - u6_0Ext, 11, 6>, AddrModeRel, ImmRegRel; +def: Storexm_simple_pat; +def: Storexm_simple_pat; +def: Storexm_simple_pat; - let accessSize = HalfWordAccess in - defm STrih_indexed: ST_Idxd < "memh", "STrih", IntRegs, s11_1Ext, - u6_1Ext, 12, 7>, AddrModeRel, ImmRegRel; +// Store predicate. +let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13, + isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in +def STriw_pred : STInst<(outs), + (ins IntRegs:$addr, s11_2Ext:$off, PredRegs:$src1), + ".error \"should not emit\"", []>; + +// S2_allocframe: Allocate stack frame. +let Defs = [R29, R30], Uses = [R29, R31, R30], + hasSideEffects = 0, accessSize = DoubleWordAccess in +def S2_allocframe: ST0Inst < + (outs), (ins u11_3Imm:$u11_3), + "allocframe(#$u11_3)" > { + bits<14> u11_3; + + let IClass = 0b1010; + let Inst{27-16} = 0b000010011101; + let Inst{13-11} = 0b000; + let Inst{10-0} = u11_3{13-3}; + } - let accessSize = WordAccess in - defm STriw_indexed: ST_Idxd < "memw", "STriw", IntRegs, s11_2Ext, - u6_2Ext, 13, 8>, AddrModeRel, ImmRegRel; +// S2_storer[bhwdf]_pci: Store byte/half/word/double. +// S2_storer[bhwdf]_pci -> S2_storerbnew_pci +let Uses = [CS] in +class T_store_pci MajOp, + MemAccessSize AlignSize, string RegSrc = "Rt"> + : STInst <(outs IntRegs:$_dst_), + (ins IntRegs:$Rz, Imm:$offset, ModRegs:$Mu, RC:$Rt), + #mnemonic#"($Rz ++ #$offset:circ($Mu)) = $"#RegSrc#"", + [] , + "$Rz = $_dst_" > { + bits<5> Rz; + bits<7> offset; + bits<1> Mu; + bits<5> Rt; + let accessSize = AlignSize; + let isNVStorable = !if(!eq(mnemonic,"memd"), 0, + !if(!eq(RegSrc,"Rt.h"), 0, 1)); - let accessSize = DoubleWordAccess, isNVStorable = 0 in - defm STrid_indexed: ST_Idxd < "memd", "STrid", DoubleRegs, s11_3Ext, - u6_3Ext, 14, 9>, AddrModeRel; -} + let IClass = 0b1010; + let Inst{27-25} = 0b100; + let Inst{24-21} = MajOp; + let Inst{20-16} = Rz; + let Inst{13} = Mu; + let Inst{12-8} = Rt; + let Inst{7} = 0b0; + let Inst{6-3} = + !if (!eq(!cast(AlignSize), "DoubleWordAccess"), offset{6-3}, + !if (!eq(!cast(AlignSize), "WordAccess"), offset{5-2}, + !if (!eq(!cast(AlignSize), "HalfWordAccess"), offset{4-1}, + /* ByteAccess */ offset{3-0}))); + let Inst{1} = 0b0; + } -let AddedComplexity = 10 in { -def : Pat<(truncstorei8 (i32 IntRegs:$src1), (add IntRegs:$src2, - s11_0ExtPred:$offset)), - (STrib_indexed IntRegs:$src2, s11_0ImmPred:$offset, - (i32 IntRegs:$src1))>; +def S2_storerb_pci : T_store_pci<"memb", IntRegs, s4_0Imm, 0b1000, + ByteAccess>; +def S2_storerh_pci : T_store_pci<"memh", IntRegs, s4_1Imm, 0b1010, + HalfWordAccess>; +def S2_storerf_pci : T_store_pci<"memh", IntRegs, s4_1Imm, 0b1011, + HalfWordAccess, "Rt.h">; +def S2_storeri_pci : T_store_pci<"memw", IntRegs, s4_2Imm, 0b1100, + WordAccess>; +def S2_storerd_pci : T_store_pci<"memd", DoubleRegs, s4_3Imm, 0b1110, + DoubleWordAccess>; + +let Uses = [CS], isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 4 in +class T_storenew_pci MajOp, MemAccessSize AlignSize> + : NVInst < (outs IntRegs:$_dst_), + (ins IntRegs:$Rz, Imm:$offset, ModRegs:$Mu, IntRegs:$Nt), + #mnemonic#"($Rz ++ #$offset:circ($Mu)) = $Nt.new", + [], + "$Rz = $_dst_"> { + bits<5> Rz; + bits<6> offset; + bits<1> Mu; + bits<3> Nt; + + let accessSize = AlignSize; + + let IClass = 0b1010; + let Inst{27-21} = 0b1001101; + let Inst{20-16} = Rz; + let Inst{13} = Mu; + let Inst{12-11} = MajOp; + let Inst{10-8} = Nt; + let Inst{7} = 0b0; + let Inst{6-3} = + !if (!eq(!cast(AlignSize), "WordAccess"), offset{5-2}, + !if (!eq(!cast(AlignSize), "HalfWordAccess"), offset{4-1}, + /* ByteAccess */ offset{3-0})); + let Inst{1} = 0b0; + } -def : Pat<(truncstorei16 (i32 IntRegs:$src1), (add IntRegs:$src2, - s11_1ExtPred:$offset)), - (STrih_indexed IntRegs:$src2, s11_1ImmPred:$offset, - (i32 IntRegs:$src1))>; +def S2_storerbnew_pci : T_storenew_pci <"memb", s4_0Imm, 0b00, ByteAccess>; +def S2_storerhnew_pci : T_storenew_pci <"memh", s4_1Imm, 0b01, HalfWordAccess>; +def S2_storerinew_pci : T_storenew_pci <"memw", s4_2Imm, 0b10, WordAccess>; -def : Pat<(store (i32 IntRegs:$src1), (add IntRegs:$src2, - s11_2ExtPred:$offset)), - (STriw_indexed IntRegs:$src2, s11_2ImmPred:$offset, - (i32 IntRegs:$src1))>; +//===----------------------------------------------------------------------===// +// Circular stores - Pseudo +// +// Please note that the input operand order in the pseudo instructions +// doesn't match with the real instructions. Pseudo instructions operand +// order should mimics the ordering in the intrinsics. +//===----------------------------------------------------------------------===// +let isCodeGenOnly = 1, mayStore = 1, hasSideEffects = 0, isPseudo = 1 in +class T_store_pci_pseudo + : STInstPI<(outs IntRegs:$_dst_), + (ins IntRegs:$src1, RC:$src2, IntRegs:$src3, s4Imm:$src4), + ".error \""#opc#"($src1++#$src4:circ($src3)) = $src2\"", + [], "$_dst_ = $src1">; + +def S2_storerb_pci_pseudo : T_store_pci_pseudo <"memb", IntRegs>; +def S2_storerh_pci_pseudo : T_store_pci_pseudo <"memh", IntRegs>; +def S2_storerf_pci_pseudo : T_store_pci_pseudo <"memh", IntRegs>; +def S2_storeri_pci_pseudo : T_store_pci_pseudo <"memw", IntRegs>; +def S2_storerd_pci_pseudo : T_store_pci_pseudo <"memd", DoubleRegs>; + +//===----------------------------------------------------------------------===// +// Circular stores with auto-increment register +//===----------------------------------------------------------------------===// +let Uses = [CS] in +class T_store_pcr MajOp, + MemAccessSize AlignSize, string RegSrc = "Rt"> + : STInst <(outs IntRegs:$_dst_), + (ins IntRegs:$Rz, ModRegs:$Mu, RC:$Rt), + #mnemonic#"($Rz ++ I:circ($Mu)) = $"#RegSrc#"", + [], + "$Rz = $_dst_" > { + bits<5> Rz; + bits<1> Mu; + bits<5> Rt; -def : Pat<(store (i64 DoubleRegs:$src1), (add IntRegs:$src2, - s11_3ExtPred:$offset)), - (STrid_indexed IntRegs:$src2, s11_3ImmPred:$offset, - (i64 DoubleRegs:$src1))>; -} + let accessSize = AlignSize; + let isNVStorable = !if(!eq(mnemonic,"memd"), 0, + !if(!eq(RegSrc,"Rt.h"), 0, 1)); + + let IClass = 0b1010; + let Inst{27-25} = 0b100; + let Inst{24-21} = MajOp; + let Inst{20-16} = Rz; + let Inst{13} = Mu; + let Inst{12-8} = Rt; + let Inst{7} = 0b0; + let Inst{1} = 0b1; + } -// memh(Rx++#s4:1)=Rt.H +def S2_storerb_pcr : T_store_pcr<"memb", IntRegs, 0b1000, ByteAccess>; +def S2_storerh_pcr : T_store_pcr<"memh", IntRegs, 0b1010, HalfWordAccess>; +def S2_storeri_pcr : T_store_pcr<"memw", IntRegs, 0b1100, WordAccess>; +def S2_storerd_pcr : T_store_pcr<"memd", DoubleRegs, 0b1110, DoubleWordAccess>; +def S2_storerf_pcr : T_store_pcr<"memh", IntRegs, 0b1011, + HalfWordAccess, "Rt.h">; + +//===----------------------------------------------------------------------===// +// Circular .new stores with auto-increment register +//===----------------------------------------------------------------------===// +let Uses = [CS], isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 3 in +class T_storenew_pcr MajOp, + MemAccessSize AlignSize> + : NVInst <(outs IntRegs:$_dst_), + (ins IntRegs:$Rz, ModRegs:$Mu, IntRegs:$Nt), + #mnemonic#"($Rz ++ I:circ($Mu)) = $Nt.new" , + [] , + "$Rz = $_dst_"> { + bits<5> Rz; + bits<1> Mu; + bits<3> Nt; + + let accessSize = AlignSize; + + let IClass = 0b1010; + let Inst{27-21} = 0b1001101; + let Inst{20-16} = Rz; + let Inst{13} = Mu; + let Inst{12-11} = MajOp; + let Inst{10-8} = Nt; + let Inst{7} = 0b0; + let Inst{1} = 0b1; + } + +def S2_storerbnew_pcr : T_storenew_pcr <"memb", 0b00, ByteAccess>; +def S2_storerhnew_pcr : T_storenew_pcr <"memh", 0b01, HalfWordAccess>; +def S2_storerinew_pcr : T_storenew_pcr <"memw", 0b10, WordAccess>; -// Store word. -// Store predicate. -let Defs = [R10,R11,D5], hasSideEffects = 0 in -def STriw_pred : STInst2<(outs), - (ins MEMri:$addr, PredRegs:$src1), - "Error; should not emit", - []>; - -// Allocate stack frame. -let Defs = [R29, R30], Uses = [R31, R30], hasSideEffects = 0 in { - def ALLOCFRAME : STInst2<(outs), - (ins i32imm:$amt), - "allocframe(#$amt)", - []>; -} //===----------------------------------------------------------------------===// -// ST - +// Bit-reversed stores with auto-increment register //===----------------------------------------------------------------------===// +let hasSideEffects = 0 in +class T_store_pbr majOp, + bit isHalf = 0> + : STInst + <(outs IntRegs:$_dst_), + (ins IntRegs:$Rz, ModRegs:$Mu, RC:$src), + #mnemonic#"($Rz ++ $Mu:brev) = $src"#!if (!eq(isHalf, 1), ".h", ""), + [], "$Rz = $_dst_" > { + + let accessSize = addrSize; + + bits<5> Rz; + bits<1> Mu; + bits<5> src; + + let IClass = 0b1010; + + let Inst{27-24} = 0b1111; + let Inst{23-21} = majOp; + let Inst{7} = 0b0; + let Inst{20-16} = Rz; + let Inst{13} = Mu; + let Inst{12-8} = src; + } + +let isNVStorable = 1 in { + let BaseOpcode = "S2_storerb_pbr" in + def S2_storerb_pbr : T_store_pbr<"memb", IntRegs, ByteAccess, + 0b000>, NewValueRel; + let BaseOpcode = "S2_storerh_pbr" in + def S2_storerh_pbr : T_store_pbr<"memh", IntRegs, HalfWordAccess, + 0b010>, NewValueRel; + let BaseOpcode = "S2_storeri_pbr" in + def S2_storeri_pbr : T_store_pbr<"memw", IntRegs, WordAccess, + 0b100>, NewValueRel; +} + +def S2_storerf_pbr : T_store_pbr<"memh", IntRegs, HalfWordAccess, 0b011, 1>; +def S2_storerd_pbr : T_store_pbr<"memd", DoubleRegs, DoubleWordAccess, 0b110>; + +//===----------------------------------------------------------------------===// +// Bit-reversed .new stores with auto-increment register +//===----------------------------------------------------------------------===// +let isNewValue = 1, mayStore = 1, isNVStore = 1, opNewValue = 3, + hasSideEffects = 0 in +class T_storenew_pbr majOp> + : NVInst <(outs IntRegs:$_dst_), + (ins IntRegs:$Rz, ModRegs:$Mu, IntRegs:$Nt), + #mnemonic#"($Rz ++ $Mu:brev) = $Nt.new", [], + "$Rz = $_dst_">, NewValueRel { + let accessSize = addrSize; + bits<5> Rz; + bits<1> Mu; + bits<3> Nt; + + let IClass = 0b1010; + + let Inst{27-21} = 0b1111101; + let Inst{12-11} = majOp; + let Inst{7} = 0b0; + let Inst{20-16} = Rz; + let Inst{13} = Mu; + let Inst{10-8} = Nt; + } + +let BaseOpcode = "S2_storerb_pbr" in +def S2_storerbnew_pbr : T_storenew_pbr<"memb", ByteAccess, 0b00>; + +let BaseOpcode = "S2_storerh_pbr" in +def S2_storerhnew_pbr : T_storenew_pbr<"memh", HalfWordAccess, 0b01>; + +let BaseOpcode = "S2_storeri_pbr" in +def S2_storerinew_pbr : T_storenew_pbr<"memw", WordAccess, 0b10>; //===----------------------------------------------------------------------===// -// STYPE/ALU + +// Bit-reversed stores - Pseudo +// +// Please note that the input operand order in the pseudo instructions +// doesn't match with the real instructions. Pseudo instructions operand +// order should mimics the ordering in the intrinsics. //===----------------------------------------------------------------------===// -// Logical NOT. -def NOT_rr64 : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1), - "$dst = not($src1)", - [(set (i64 DoubleRegs:$dst), (not (i64 DoubleRegs:$src1)))]>; +let isCodeGenOnly = 1, mayStore = 1, hasSideEffects = 0, isPseudo = 1 in +class T_store_pbr_pseudo + : STInstPI<(outs IntRegs:$_dst_), + (ins IntRegs:$src1, RC:$src2, IntRegs:$src3), + ".error \""#opc#"($src1++$src3:brev) = $src2\"", + [], "$_dst_ = $src1">; +def S2_storerb_pbr_pseudo : T_store_pbr_pseudo <"memb", IntRegs>; +def S2_storerh_pbr_pseudo : T_store_pbr_pseudo <"memh", IntRegs>; +def S2_storeri_pbr_pseudo : T_store_pbr_pseudo <"memw", IntRegs>; +def S2_storerf_pbr_pseudo : T_store_pbr_pseudo <"memh", IntRegs>; +def S2_storerd_pbr_pseudo : T_store_pbr_pseudo <"memd", DoubleRegs>; //===----------------------------------------------------------------------===// -// STYPE/ALU - +// ST - //===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// Template class for S_2op instructions. +//===----------------------------------------------------------------------===// let hasSideEffects = 0 in class T_S2op_1 RegTyBits, RegisterClass RCOut, RegisterClass RCIn, bits<2> MajOp, bits<3> MinOp, bit isSat> @@ -2784,26 +3971,62 @@ let hasNewValue = 1 in class T_S2op_1_ii MajOp, bits<3> MinOp, bit isSat = 0> : T_S2op_1 ; +// Vector sign/zero extend +let isReMaterializable = 1, isAsCheapAsAMove = 1 in { + def S2_vsxtbh : T_S2op_1_di <"vsxtbh", 0b00, 0b000>; + def S2_vsxthw : T_S2op_1_di <"vsxthw", 0b00, 0b100>; + def S2_vzxtbh : T_S2op_1_di <"vzxtbh", 0b00, 0b010>; + def S2_vzxthw : T_S2op_1_di <"vzxthw", 0b00, 0b110>; +} + +// Vector splat bytes/halfwords +let isReMaterializable = 1, isAsCheapAsAMove = 1 in { + def S2_vsplatrb : T_S2op_1_ii <"vsplatb", 0b01, 0b111>; + def S2_vsplatrh : T_S2op_1_di <"vsplath", 0b01, 0b010>; +} + // Sign extend word to doubleword -let isCodeGenOnly = 0 in def A2_sxtw : T_S2op_1_di <"sxtw", 0b01, 0b000>; def: Pat <(i64 (sext I32:$src)), (A2_sxtw I32:$src)>; +// Vector saturate and pack +let Defs = [USR_OVF] in { + def S2_svsathb : T_S2op_1_ii <"vsathb", 0b10, 0b000>; + def S2_svsathub : T_S2op_1_ii <"vsathub", 0b10, 0b010>; + def S2_vsathb : T_S2op_1_id <"vsathb", 0b00, 0b110>; + def S2_vsathub : T_S2op_1_id <"vsathub", 0b00, 0b000>; + def S2_vsatwh : T_S2op_1_id <"vsatwh", 0b00, 0b010>; + def S2_vsatwuh : T_S2op_1_id <"vsatwuh", 0b00, 0b100>; +} + +// Vector truncate +def S2_vtrunohb : T_S2op_1_id <"vtrunohb", 0b10, 0b000>; +def S2_vtrunehb : T_S2op_1_id <"vtrunehb", 0b10, 0b010>; + // Swizzle the bytes of a word -let isCodeGenOnly = 0 in def A2_swiz : T_S2op_1_ii <"swiz", 0b10, 0b111>; // Saturate -let Defs = [USR_OVF], isCodeGenOnly = 0 in { +let Defs = [USR_OVF] in { def A2_sat : T_S2op_1_id <"sat", 0b11, 0b000>; def A2_satb : T_S2op_1_ii <"satb", 0b11, 0b111>; def A2_satub : T_S2op_1_ii <"satub", 0b11, 0b110>; def A2_sath : T_S2op_1_ii <"sath", 0b11, 0b100>; def A2_satuh : T_S2op_1_ii <"satuh", 0b11, 0b101>; + def A2_roundsat : T_S2op_1_id <"round", 0b11, 0b001, 0b1>; } -let Itinerary = S_2op_tc_2_SLOT23, isCodeGenOnly = 0 in { +let Itinerary = S_2op_tc_2_SLOT23 in { + // Vector round and pack + def S2_vrndpackwh : T_S2op_1_id <"vrndwh", 0b10, 0b100>; + + let Defs = [USR_OVF] in + def S2_vrndpackwhs : T_S2op_1_id <"vrndwh", 0b10, 0b110, 1>; + + // Bit reverse + def S2_brev : T_S2op_1_ii <"brev", 0b01, 0b110>; + // Absolute value word def A2_abs : T_S2op_1_ii <"abs", 0b10, 0b100>; @@ -2848,7 +4071,14 @@ class T_S2op_2 RegTyBits, RegisterClass RCOut, let Inst{7-5} = MinOp; let Inst{4-0} = dst; } - + +class T_S2op_2_di MajOp, bits<3> MinOp> + : T_S2op_2 ; + +let hasNewValue = 1 in +class T_S2op_2_id MajOp, bits<3> MinOp> + : T_S2op_2 ; + let hasNewValue = 1 in class T_S2op_2_ii MajOp, bits<3> MinOp, bit isSat = 0, bit isRnd = 0, list pattern = []> @@ -2860,21 +4090,33 @@ class T_S2op_shift MajOp, bits<3> MinOp, SDNode OpNd> [(set (i32 IntRegs:$dst), (OpNd (i32 IntRegs:$src), (u5ImmPred:$u5)))]>; +// Vector arithmetic shift right by immediate with truncate and pack +def S2_asr_i_svw_trun : T_S2op_2_id <"vasrw", 0b110, 0b010>; + // Arithmetic/logical shift right/left by immediate -let Itinerary = S_2op_tc_1_SLOT23, isCodeGenOnly = 0 in { +let Itinerary = S_2op_tc_1_SLOT23 in { def S2_asr_i_r : T_S2op_shift <"asr", 0b000, 0b000, sra>; def S2_lsr_i_r : T_S2op_shift <"lsr", 0b000, 0b001, srl>; def S2_asl_i_r : T_S2op_shift <"asl", 0b000, 0b010, shl>; } // Shift left by immediate with saturation -let Defs = [USR_OVF], isCodeGenOnly = 0 in +let Defs = [USR_OVF] in def S2_asl_i_r_sat : T_S2op_2_ii <"asl", 0b010, 0b010, 1>; // Shift right with round -let isCodeGenOnly = 0 in def S2_asr_i_r_rnd : T_S2op_2_ii <"asr", 0b010, 0b000, 0, 1>; +let isAsmParserOnly = 1 in +def S2_asr_i_r_rnd_goodsyntax + : SInst <(outs IntRegs:$dst), (ins IntRegs:$src, u5Imm:$u5), + "$dst = asrrnd($src, #$u5)", + [], "", S_2op_tc_1_SLOT23>; + +let isAsmParserOnly = 1 in +def A2_not: ALU32_rr<(outs IntRegs:$dst),(ins IntRegs:$src), + "$dst = not($src)">; + def: Pat<(i32 (sra (i32 (add (i32 (sra I32:$src1, u5ImmPred:$src2)), (i32 1))), (i32 1))), @@ -2893,17 +4135,34 @@ class T_S2op_3MajOp, bits<3>minOp, bits<1> sat = 0> let Inst{4-0} = Rdd; } -let isCodeGenOnly = 0 in { def A2_absp : T_S2op_3 <"abs", 0b10, 0b110>; def A2_negp : T_S2op_3 <"neg", 0b10, 0b101>; def A2_notp : T_S2op_3 <"not", 0b10, 0b100>; -} // Innterleave/deinterleave -let isCodeGenOnly = 0 in { def S2_interleave : T_S2op_3 <"interleave", 0b11, 0b101>; def S2_deinterleave : T_S2op_3 <"deinterleave", 0b11, 0b100>; -} + +// Vector Complex conjugate +def A2_vconj : T_S2op_3 <"vconj", 0b10, 0b111, 1>; + +// Vector saturate without pack +def S2_vsathb_nopack : T_S2op_3 <"vsathb", 0b00, 0b111>; +def S2_vsathub_nopack : T_S2op_3 <"vsathub", 0b00, 0b100>; +def S2_vsatwh_nopack : T_S2op_3 <"vsatwh", 0b00, 0b110>; +def S2_vsatwuh_nopack : T_S2op_3 <"vsatwuh", 0b00, 0b101>; + +// Vector absolute value halfwords with and without saturation +// Rdd64=vabsh(Rss64)[:sat] +def A2_vabsh : T_S2op_3 <"vabsh", 0b01, 0b100>; +def A2_vabshsat : T_S2op_3 <"vabsh", 0b01, 0b101, 1>; + +// Vector absolute value words with and without saturation +def A2_vabsw : T_S2op_3 <"vabsw", 0b01, 0b110>; +def A2_vabswsat : T_S2op_3 <"vabsw", 0b01, 0b111, 1>; + +def : Pat<(not (i64 DoubleRegs:$src1)), + (A2_notp DoubleRegs:$src1)>; //===----------------------------------------------------------------------===// // STYPE/BIT + @@ -2934,7 +4193,6 @@ class T_COUNT_LEADING_64 MajOp, bits<3> MinOp> : T_COUNT_LEADING; -let isCodeGenOnly = 0 in { def S2_cl0 : T_COUNT_LEADING_32<"cl0", 0b000, 0b101>; def S2_cl1 : T_COUNT_LEADING_32<"cl1", 0b000, 0b110>; def S2_ct0 : T_COUNT_LEADING_32<"ct0", 0b010, 0b100>; @@ -2944,14 +4202,28 @@ def S2_cl1p : T_COUNT_LEADING_64<"cl1", 0b010, 0b100>; def S2_clb : T_COUNT_LEADING_32<"clb", 0b000, 0b100>; def S2_clbp : T_COUNT_LEADING_64<"clb", 0b010, 0b000>; def S2_clbnorm : T_COUNT_LEADING_32<"normamt", 0b000, 0b111>; -} -def: Pat<(i32 (ctlz I32:$Rs)), (S2_cl0 I32:$Rs)>; -def: Pat<(i32 (ctlz (not I32:$Rs))), (S2_cl1 I32:$Rs)>; -def: Pat<(i32 (cttz I32:$Rs)), (S2_ct0 I32:$Rs)>; -def: Pat<(i32 (cttz (not I32:$Rs))), (S2_ct1 I32:$Rs)>; -def: Pat<(i32 (trunc (ctlz I64:$Rss))), (S2_cl0p I64:$Rss)>; +// Count leading zeros. +def: Pat<(i32 (ctlz I32:$Rs)), (S2_cl0 I32:$Rs)>; +def: Pat<(i32 (trunc (ctlz I64:$Rss))), (S2_cl0p I64:$Rss)>; +def: Pat<(i32 (ctlz_zero_undef I32:$Rs)), (S2_cl0 I32:$Rs)>; +def: Pat<(i32 (trunc (ctlz_zero_undef I64:$Rss))), (S2_cl0p I64:$Rss)>; + +// Count trailing zeros: 32-bit. +def: Pat<(i32 (cttz I32:$Rs)), (S2_ct0 I32:$Rs)>; +def: Pat<(i32 (cttz_zero_undef I32:$Rs)), (S2_ct0 I32:$Rs)>; + +// Count leading ones. +def: Pat<(i32 (ctlz (not I32:$Rs))), (S2_cl1 I32:$Rs)>; def: Pat<(i32 (trunc (ctlz (not I64:$Rss)))), (S2_cl1p I64:$Rss)>; +def: Pat<(i32 (ctlz_zero_undef (not I32:$Rs))), (S2_cl1 I32:$Rs)>; +def: Pat<(i32 (trunc (ctlz_zero_undef (not I64:$Rss)))), (S2_cl1p I64:$Rss)>; + +// Count trailing ones: 32-bit. +def: Pat<(i32 (cttz (not I32:$Rs))), (S2_ct1 I32:$Rs)>; +def: Pat<(i32 (cttz_zero_undef (not I32:$Rs))), (S2_ct1 I32:$Rs)>; + +// The 64-bit counts leading/trailing are defined in HexagonInstrInfoV4.td. // Bit set/clear/toggle @@ -2986,14 +4258,12 @@ class T_SCT_BIT_REG MinOp> let Inst{4-0} = Rd; } -let isCodeGenOnly = 0 in { def S2_clrbit_i : T_SCT_BIT_IMM<"clrbit", 0b001>; def S2_setbit_i : T_SCT_BIT_IMM<"setbit", 0b000>; def S2_togglebit_i : T_SCT_BIT_IMM<"togglebit", 0b010>; def S2_clrbit_r : T_SCT_BIT_REG<"clrbit", 0b01>; def S2_setbit_r : T_SCT_BIT_REG<"setbit", 0b00>; def S2_togglebit_r : T_SCT_BIT_REG<"togglebit", 0b10>; -} def: Pat<(i32 (and (i32 IntRegs:$Rs), (not (shl 1, u5ImmPred:$u5)))), (S2_clrbit_i IntRegs:$Rs, u5ImmPred:$u5)>; @@ -3043,10 +4313,8 @@ class T_TEST_BIT_REG let Inst{1-0} = Pd; } -let isCodeGenOnly = 0 in { def S2_tstbit_i : T_TEST_BIT_IMM<"tstbit", 0b000>; def S2_tstbit_r : T_TEST_BIT_REG<"tstbit", 0>; -} let AddedComplexity = 20 in { // Complexity greater than cmp reg-imm. def: Pat<(i1 (setne (and (shl 1, u5ImmPred:$u5), (i32 IntRegs:$Rs)), 0)), @@ -3058,6 +4326,7 @@ let AddedComplexity = 20 in { // Complexity greater than cmp reg-imm. def: Pat<(i1 (trunc (i64 DoubleRegs:$Rs))), (S2_tstbit_i (LoReg DoubleRegs:$Rs), 0)>; } + let hasSideEffects = 0 in class T_TEST_BITS_IMM MajOp, bit IsNeg> : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, u6Imm:$u6), @@ -3092,11 +4361,9 @@ class T_TEST_BITS_REG MajOp, bit IsNeg> let Inst{1-0} = Pd; } -let isCodeGenOnly = 0 in { def C2_bitsclri : T_TEST_BITS_IMM<"bitsclr", 0b10, 0>; def C2_bitsclr : T_TEST_BITS_REG<"bitsclr", 0b10, 0>; def C2_bitsset : T_TEST_BITS_REG<"bitsset", 0b01, 0>; -} let AddedComplexity = 20 in { // Complexity greater than compare reg-imm. def: Pat<(i1 (seteq (and (i32 IntRegs:$Rs), u6ImmPred:$u6), 0)), @@ -3124,6 +4391,14 @@ def: Pat<(i1 (seteq (and (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)), IntRegs:$Rt)), // XTYPE/PERM + //===----------------------------------------------------------------------===// +def: Pat<(or (or (shl (or (shl (i32 (extloadi8 (add (i32 IntRegs:$b), 3))), + (i32 8)), + (i32 (zextloadi8 (add (i32 IntRegs:$b), 2)))), + (i32 16)), + (shl (i32 (zextloadi8 (add (i32 IntRegs:$b), 1))), (i32 8))), + (zextloadi8 (i32 IntRegs:$b))), + (A2_swiz (L2_loadri_io IntRegs:$b, 0))>; + //===----------------------------------------------------------------------===// // XTYPE/PERM - //===----------------------------------------------------------------------===// @@ -3133,7 +4408,7 @@ def: Pat<(i1 (seteq (and (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)), IntRegs:$Rt)), //===----------------------------------------------------------------------===// // Predicate transfer. -let hasSideEffects = 0, hasNewValue = 1, isCodeGenOnly = 0 in +let hasSideEffects = 0, hasNewValue = 1 in def C2_tfrpr : SInst<(outs IntRegs:$Rd), (ins PredRegs:$Ps), "$Rd = $Ps", [], "", S_2op_tc_1_SLOT23> { bits<5> Rd; @@ -3147,7 +4422,7 @@ def C2_tfrpr : SInst<(outs IntRegs:$Rd), (ins PredRegs:$Ps), } // Transfer general register to predicate. -let hasSideEffects = 0, isCodeGenOnly = 0 in +let hasSideEffects = 0 in def C2_tfrrp: SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs), "$Pd = $Rs", [], "", S_2op_tc_2early_SLOT23> { bits<2> Pd; @@ -3159,6 +4434,27 @@ def C2_tfrrp: SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs), let Inst{1-0} = Pd; } +let hasSideEffects = 0, isCodeGenOnly = 1 in +def C2_pxfer_map: SInst<(outs PredRegs:$dst), (ins PredRegs:$src), + "$dst = $src">; + + +// Patterns for loads of i1: +def: Pat<(i1 (load AddrFI:$fi)), + (C2_tfrrp (L2_loadrub_io AddrFI:$fi, 0))>; +def: Pat<(i1 (load (add (i32 IntRegs:$Rs), s32ImmPred:$Off))), + (C2_tfrrp (L2_loadrub_io IntRegs:$Rs, imm:$Off))>; +def: Pat<(i1 (load (i32 IntRegs:$Rs))), + (C2_tfrrp (L2_loadrub_io IntRegs:$Rs, 0))>; + +def I1toI32: OutPatFrag<(ops node:$Rs), + (C2_muxii (i1 $Rs), 1, 0)>; + +def I32toI1: OutPatFrag<(ops node:$Rs), + (i1 (C2_tfrrp (i32 $Rs)))>; + +defm: Storexm_pat; +def: Storexm_simple_pat; //===----------------------------------------------------------------------===// // STYPE/PRED - @@ -3191,15 +4487,12 @@ class S_2OpInstImmI6MinOp> } // Shift by immediate. -let isCodeGenOnly = 0 in { def S2_asr_i_p : S_2OpInstImmI6<"asr", sra, 0b000>; def S2_asl_i_p : S_2OpInstImmI6<"asl", shl, 0b010>; def S2_lsr_i_p : S_2OpInstImmI6<"lsr", srl, 0b001>; -} // Shift left by small amount and add. -let AddedComplexity = 100, hasNewValue = 1, hasSideEffects = 0, - isCodeGenOnly = 0 in +let AddedComplexity = 100, hasNewValue = 1, hasSideEffects = 0 in def S2_addasl_rrri: SInst <(outs IntRegs:$Rd), (ins IntRegs:$Rt, IntRegs:$Rs, u3Imm:$u3), "$Rd = addasl($Rt, $Rs, #$u3)" , @@ -3248,8 +4541,8 @@ def S2_addasl_rrri: SInst <(outs IntRegs:$Rd), //===----------------------------------------------------------------------===// def HexagonBARRIER: SDNode<"HexagonISD::BARRIER", SDTNone, [SDNPHasChain]>; -let hasSideEffects = 1, isSoloAX = 1, isCodeGenOnly = 0 in -def BARRIER : SYSInst<(outs), (ins), +let hasSideEffects = 1, isSoloAX = 1 in +def Y2_barrier : SYSInst<(outs), (ins), "barrier", [(HexagonBARRIER)],"",ST_tc_st_SLOT0> { let Inst{31-28} = 0b1010; @@ -3259,6 +4552,20 @@ def BARRIER : SYSInst<(outs), (ins), //===----------------------------------------------------------------------===// // SYSTEM/SUPER - //===----------------------------------------------------------------------===// + +// Generate frameindex addresses. The main reason for the offset operand is +// that every instruction that is allowed to have frame index as an operand +// will then have that operand followed by an immediate operand (the offset). +// This simplifies the frame-index elimination code. +// +let isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1, + isPseudo = 1, isCodeGenOnly = 1, hasSideEffects = 0 in { + def TFR_FI : ALU32_ri<(outs IntRegs:$Rd), + (ins IntRegs:$fi, s32Imm:$off), "">; + def TFR_FIA : ALU32_ri<(outs IntRegs:$Rd), + (ins IntRegs:$Rs, IntRegs:$fi, s32Imm:$off), "">; +} + //===----------------------------------------------------------------------===// // CRUSER - Type. //===----------------------------------------------------------------------===// @@ -3304,14 +4611,19 @@ class LOOP_rBase multiclass LOOP_ri { def i : LOOP_iBase; def r : LOOP_rBase; + + let isCodeGenOnly = 1, isExtended = 1, opExtendable = 0 in { + def iext: LOOP_iBase; + def rext: LOOP_rBase; + } } -let Defs = [SA0, LC0, USR], isCodeGenOnly = 0 in +let Defs = [SA0, LC0, USR] in defm J2_loop0 : LOOP_ri<"loop0">; // Interestingly only loop0's appear to set usr.lpcfg -let Defs = [SA1, LC1], isCodeGenOnly = 0 in +let Defs = [SA1, LC1] in defm J2_loop1 : LOOP_ri<"loop1">; let isBranch = 1, isTerminator = 1, hasSideEffects = 0, @@ -3372,12 +4684,40 @@ multiclass SPLOOP_ri op> { def r : SPLOOP_rBase; } -let isCodeGenOnly = 0 in { defm J2_ploop1s : SPLOOP_ri<"1", 0b01>; defm J2_ploop2s : SPLOOP_ri<"2", 0b10>; defm J2_ploop3s : SPLOOP_ri<"3", 0b11>; + +// if (Rs[!>=<]=#0) jump:[t/nt] +let Defs = [PC], isPredicated = 1, isBranch = 1, hasSideEffects = 0, + hasSideEffects = 0 in +class J2_jump_0_Base op> + : CRInst <(outs), (ins IntRegs:$Rs, brtarget:$r13_2), + "if ($Rs"#compare#"#0) jump"#!if(isTak, ":t", ":nt")#" $r13_2" > { + bits<5> Rs; + bits<15> r13_2; + + let IClass = 0b0110; + + let Inst{27-24} = 0b0001; + let Inst{23-22} = op; + let Inst{12} = isTak; + let Inst{21} = r13_2{14}; + let Inst{20-16} = Rs; + let Inst{11-1} = r13_2{12-2}; + let Inst{13} = r13_2{13}; + } + +multiclass J2_jump_compare_0 op> { + def NAME : J2_jump_0_Base; + def NAME#pt : J2_jump_0_Base; } +defm J2_jumprz : J2_jump_compare_0<"!=", 0b00>; +defm J2_jumprgtez : J2_jump_compare_0<">=", 0b01>; +defm J2_jumprnz : J2_jump_compare_0<"==", 0b10>; +defm J2_jumprltez : J2_jump_compare_0<"<=", 0b11>; + // Transfer to/from Control/GPR Guest/GPR let hasSideEffects = 0 in class TFR_CR_RS_base @@ -3394,8 +4734,9 @@ class TFR_CR_RS_base let Inst{20-16} = src; let Inst{4-0} = dst; } -let isCodeGenOnly = 0 in + def A2_tfrrcr : TFR_CR_RS_base; +def A4_tfrpcp : TFR_CR_RS_base; def : InstAlias<"m0 = $Rs", (A2_tfrrcr C6, IntRegs:$Rs)>; def : InstAlias<"m1 = $Rs", (A2_tfrrcr C7, IntRegs:$Rs)>; @@ -3415,13 +4756,14 @@ class TFR_RD_CR_base let Inst{4-0} = dst; } -let hasNewValue = 1, opNewValue = 0, isCodeGenOnly = 0 in +let hasNewValue = 1, opNewValue = 0 in def A2_tfrcrr : TFR_RD_CR_base; +def A4_tfrcpp : TFR_RD_CR_base; def : InstAlias<"$Rd = m0", (A2_tfrcrr IntRegs:$Rd, C6)>; def : InstAlias<"$Rd = m1", (A2_tfrcrr IntRegs:$Rd, C7)>; // Y4_trace: Send value to etm trace. -let isSoloAX = 1, hasSideEffects = 0, isCodeGenOnly = 0 in +let isSoloAX = 1, hasSideEffects = 0 in def Y4_trace: CRInst <(outs), (ins IntRegs:$Rs), "trace($Rs)"> { bits<5> Rs; @@ -3431,350 +4773,201 @@ def Y4_trace: CRInst <(outs), (ins IntRegs:$Rs), let Inst{20-16} = Rs; } -let AddedComplexity = 100, isPredicated = 1 in -def TFR_condset_ri : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, s12Imm:$src3), - "Error; should not emit", - [(set (i32 IntRegs:$dst), - (i32 (select (i1 PredRegs:$src1), (i32 IntRegs:$src2), - s12ImmPred:$src3)))]>; - -let AddedComplexity = 100, isPredicated = 1 in -def TFR_condset_ir : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, s12Imm:$src2, IntRegs:$src3), - "Error; should not emit", - [(set (i32 IntRegs:$dst), - (i32 (select (i1 PredRegs:$src1), s12ImmPred:$src2, - (i32 IntRegs:$src3))))]>; - -let AddedComplexity = 100, isPredicated = 1 in -def TFR_condset_ii : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, s12Imm:$src2, s12Imm:$src3), - "Error; should not emit", - [(set (i32 IntRegs:$dst), - (i32 (select (i1 PredRegs:$src1), s12ImmPred:$src2, - s12ImmPred:$src3)))]>; - -// Generate frameindex addresses. -let isReMaterializable = 1 in -def TFR_FI : ALU32_ri<(outs IntRegs:$dst), (ins FrameIndex:$src1), - "$dst = add($src1)", - [(set (i32 IntRegs:$dst), ADDRri:$src1)]>; - // Support for generating global address. // Taken from X86InstrInfo.td. -def SDTHexagonCONST32 : SDTypeProfile<1, 1, [ - SDTCisVT<0, i32>, - SDTCisVT<1, i32>, - SDTCisPtrTy<0>]>; -def HexagonCONST32 : SDNode<"HexagonISD::CONST32", SDTHexagonCONST32>; -def HexagonCONST32_GP : SDNode<"HexagonISD::CONST32_GP", SDTHexagonCONST32>; +def SDTHexagonCONST32 : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, + SDTCisVT<1, i32>, + SDTCisPtrTy<0>]>; +def HexagonCONST32 : SDNode<"HexagonISD::CONST32", SDTHexagonCONST32>; +def HexagonCONST32_GP : SDNode<"HexagonISD::CONST32_GP", SDTHexagonCONST32>; // HI/LO Instructions -let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0 in -def LO : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst.l = #LO($global)", - []>; - -let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0 in -def HI : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst.h = #HI($global)", - []>; +let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0, + hasNewValue = 1, opNewValue = 0 in +class REG_IMMED MajOp, bit MinOp> + : ALU32_ri<(outs IntRegs:$dst), + (ins i32imm:$imm_value), + "$dst"#RegHalf#" = #"#Op#"($imm_value)", []> { + bits<5> dst; + bits<32> imm_value; + let IClass = 0b0111; -let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0 in -def LOi : ALU32_ri<(outs IntRegs:$dst), (ins i32imm:$imm_value), - "$dst.l = #LO($imm_value)", - []>; + let Inst{27} = Rs; + let Inst{26-24} = MajOp; + let Inst{21} = MinOp; + let Inst{20-16} = dst; + let Inst{23-22} = !if (!eq(Op, "LO"), imm_value{15-14}, imm_value{31-30}); + let Inst{13-0} = !if (!eq(Op, "LO"), imm_value{13-0}, imm_value{29-16}); +} +let isAsmParserOnly = 1 in { + def LO : REG_IMMED<".l", "LO", 0b0, 0b001, 0b1>; + def LO_H : REG_IMMED<".l", "HI", 0b0, 0b001, 0b1>; + def HI : REG_IMMED<".h", "HI", 0b0, 0b010, 0b1>; + def HI_L : REG_IMMED<".h", "LO", 0b0, 0b010, 0b1>; +} -let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0 in -def HIi : ALU32_ri<(outs IntRegs:$dst), (ins i32imm:$imm_value), - "$dst.h = #HI($imm_value)", - []>; +let isMoveImm = 1, isCodeGenOnly = 1 in +def LO_PIC : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label), + "$dst.l = #LO($label@GOTREL)", + []>; -let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0 in -def LO_jt : ALU32_ri<(outs IntRegs:$dst), (ins jumptablebase:$jt), - "$dst.l = #LO($jt)", - []>; +let isMoveImm = 1, isCodeGenOnly = 1 in +def HI_PIC : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label), + "$dst.h = #HI($label@GOTREL)", + []>; -let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0 in -def HI_jt : ALU32_ri<(outs IntRegs:$dst), (ins jumptablebase:$jt), - "$dst.h = #HI($jt)", - []>; +let isReMaterializable = 1, isMoveImm = 1, + isCodeGenOnly = 1, hasSideEffects = 0 in +def HI_GOT : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global), + "$dst.h = #HI($global@GOT)", + []>; +let isReMaterializable = 1, isMoveImm = 1, + isCodeGenOnly = 1, hasSideEffects = 0 in +def LO_GOT : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global), + "$dst.l = #LO($global@GOT)", + []>; -let isReMaterializable = 1, isMoveImm = 1, hasSideEffects = 0 in -def LO_label : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label), - "$dst.l = #LO($label)", - []>; +let isReMaterializable = 1, isMoveImm = 1, + isCodeGenOnly = 1, hasSideEffects = 0 in +def HI_GOTREL : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global), + "$dst.h = #HI($global@GOTREL)", + []>; -let isReMaterializable = 1, isMoveImm = 1 , hasSideEffects = 0 in -def HI_label : ALU32_ri<(outs IntRegs:$dst), (ins bblabel:$label), - "$dst.h = #HI($label)", - []>; +let isReMaterializable = 1, isMoveImm = 1, + isCodeGenOnly = 1, hasSideEffects = 0 in +def LO_GOTREL : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$global), + "$dst.l = #LO($global@GOTREL)", + []>; // This pattern is incorrect. When we add small data, we should change // this pattern to use memw(#foo). // This is for sdata. -let isMoveImm = 1 in -def CONST32 : LDInst<(outs IntRegs:$dst), (ins globaladdress:$global), +let isMoveImm = 1, isAsmParserOnly = 1 in +def CONST32 : CONSTLDInst<(outs IntRegs:$dst), (ins globaladdress:$global), "$dst = CONST32(#$global)", [(set (i32 IntRegs:$dst), (load (HexagonCONST32 tglobaltlsaddr:$global)))]>; -// This is for non-sdata. -let isReMaterializable = 1, isMoveImm = 1 in -def CONST32_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst = CONST32(#$global)", - [(set (i32 IntRegs:$dst), - (HexagonCONST32 tglobaladdr:$global))]>; - -let isReMaterializable = 1, isMoveImm = 1 in -def CONST32_set_jt : LDInst2<(outs IntRegs:$dst), (ins jumptablebase:$jt), - "$dst = CONST32(#$jt)", - [(set (i32 IntRegs:$dst), - (HexagonCONST32 tjumptable:$jt))]>; - -let isReMaterializable = 1, isMoveImm = 1 in -def CONST32GP_set : LDInst2<(outs IntRegs:$dst), (ins globaladdress:$global), - "$dst = CONST32(#$global)", - [(set (i32 IntRegs:$dst), - (HexagonCONST32_GP tglobaladdr:$global))]>; - -let isReMaterializable = 1, isMoveImm = 1 in -def CONST32_Int_Real : LDInst2<(outs IntRegs:$dst), (ins i32imm:$global), +let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in +def CONST32_Int_Real : CONSTLDInst<(outs IntRegs:$dst), (ins i32imm:$global), "$dst = CONST32(#$global)", [(set (i32 IntRegs:$dst), imm:$global) ]>; -// Map BlockAddress lowering to CONST32_Int_Real -def : Pat<(HexagonCONST32_GP tblockaddress:$addr), - (CONST32_Int_Real tblockaddress:$addr)>; +// Map TLS addressses to a CONST32 instruction +def: Pat<(HexagonCONST32 tglobaltlsaddr:$addr), (A2_tfrsi s16Ext:$addr)>; +def: Pat<(HexagonCONST32 bbl:$label), (A2_tfrsi s16Ext:$label)>; -let isReMaterializable = 1, isMoveImm = 1 in -def CONST32_Label : LDInst2<(outs IntRegs:$dst), (ins bblabel:$label), - "$dst = CONST32($label)", - [(set (i32 IntRegs:$dst), (HexagonCONST32 bbl:$label))]>; - -let isReMaterializable = 1, isMoveImm = 1 in -def CONST64_Int_Real : LDInst2<(outs DoubleRegs:$dst), (ins i64imm:$global), +let isReMaterializable = 1, isMoveImm = 1, isAsmParserOnly = 1 in +def CONST64_Int_Real : CONSTLDInst<(outs DoubleRegs:$dst), (ins i64imm:$global), "$dst = CONST64(#$global)", - [(set (i64 DoubleRegs:$dst), imm:$global) ]>; + [(set (i64 DoubleRegs:$dst), imm:$global)]>; -def TFR_PdFalse : SInst<(outs PredRegs:$dst), (ins), - "$dst = xor($dst, $dst)", - [(set (i1 PredRegs:$dst), 0)]>; +let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1, + isCodeGenOnly = 1 in +def TFR_PdTrue : SInst<(outs PredRegs:$dst), (ins), "", + [(set (i1 PredRegs:$dst), 1)]>; -def MPY_trsext : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), - "$dst = mpy($src1, $src2)", - [(set (i32 IntRegs:$dst), - (trunc (i64 (srl (i64 (mul (i64 (sext (i32 IntRegs:$src1))), - (i64 (sext (i32 IntRegs:$src2))))), - (i32 32)))))]>; +let hasSideEffects = 0, isReMaterializable = 1, isPseudo = 1, + isCodeGenOnly = 1 in +def TFR_PdFalse : SInst<(outs PredRegs:$dst), (ins), "$dst = xor($dst, $dst)", + [(set (i1 PredRegs:$dst), 0)]>; // Pseudo instructions. def SDT_SPCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>; - -def SDT_SPCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>, +def SDT_SPCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>, SDTCisVT<1, i32> ]>; -def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; - def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPCallSeqStart, [SDNPHasChain, SDNPOutGlue]>; +def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; -def SDT_SPCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; - -def call : SDNode<"HexagonISD::CALL", SDT_SPCall, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, SDNPVariadic]>; +def SDT_SPCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; // For tailcalls a HexagonTCRet SDNode has 3 SDNode Properties - a chain, // Optional Flag and Variable Arguments. // Its 1 Operand has pointer type. -def HexagonTCRet : SDNode<"HexagonISD::TC_RETURN", SDT_SPCall, - [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; +def HexagonTCRet : SDNode<"HexagonISD::TC_RETURN", SDT_SPCall, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; -let Defs = [R29, R30], Uses = [R31, R30, R29] in { - def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt), - "Should never be emitted", - [(callseq_start timm:$amt)]>; -} +let Defs = [R29, R30], Uses = [R31, R30, R29], isPseudo = 1 in +def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt), + ".error \"should not emit\" ", + [(callseq_start timm:$amt)]>; -let Defs = [R29, R30, R31], Uses = [R29] in { - def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), - "Should never be emitted", - [(callseq_end timm:$amt1, timm:$amt2)]>; -} -// Call subroutine. -let isCall = 1, hasSideEffects = 0, - Defs = [D0, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, - R22, R23, R28, R31, P0, P1, P2, P3, LC0, LC1, SA0, SA1] in { - def CALL : JInst<(outs), (ins calltarget:$dst), - "call $dst", []>; -} +let Defs = [R29, R30, R31], Uses = [R29], isPseudo = 1 in +def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), + ".error \"should not emit\" ", + [(callseq_end timm:$amt1, timm:$amt2)]>; // Call subroutine indirectly. -let Defs = VolatileV3.Regs, isCodeGenOnly = 0 in +let Defs = VolatileV3.Regs in def J2_callr : JUMPR_MISC_CALLR<0, 1>; // Indirect tail-call. -let isCodeGenOnly = 1, isCall = 1, isReturn = 1 in -def TCRETURNR : T_JMPr; +let isPseudo = 1, isCall = 1, isReturn = 1, isBarrier = 1, isPredicable = 0, + isTerminator = 1, isCodeGenOnly = 1 in +def TCRETURNr : T_JMPr; // Direct tail-calls. -let isCall = 1, isReturn = 1, isBarrier = 1, isPredicable = 0, -isTerminator = 1, isCodeGenOnly = 1 in { - def TCRETURNtg : JInst<(outs), (ins calltarget:$dst), "jump $dst", - [], "", J_tc_2early_SLOT23>; - def TCRETURNtext : JInst<(outs), (ins calltarget:$dst), "jump $dst", - [], "", J_tc_2early_SLOT23>; -} - -// Map call instruction. -def : Pat<(call (i32 IntRegs:$dst)), - (J2_callr (i32 IntRegs:$dst))>, Requires<[HasV2TOnly]>; -def : Pat<(call tglobaladdr:$dst), - (CALL tglobaladdr:$dst)>, Requires<[HasV2TOnly]>; -def : Pat<(call texternalsym:$dst), - (CALL texternalsym:$dst)>, Requires<[HasV2TOnly]>; -//Tail calls. -def : Pat<(HexagonTCRet tglobaladdr:$dst), - (TCRETURNtg tglobaladdr:$dst)>; -def : Pat<(HexagonTCRet texternalsym:$dst), - (TCRETURNtext texternalsym:$dst)>; -def : Pat<(HexagonTCRet (i32 IntRegs:$dst)), - (TCRETURNR (i32 IntRegs:$dst))>; - -// Atomic load and store support -// 8 bit atomic load -def : Pat<(atomic_load_8 ADDRriS11_0:$src1), - (i32 (L2_loadrub_io AddrFI:$src1, 0))>; - -def : Pat<(atomic_load_8 (add (i32 IntRegs:$src1), s11_0ImmPred:$offset)), - (i32 (L2_loadrub_io (i32 IntRegs:$src1), s11_0ImmPred:$offset))>; +let isPseudo = 1, isCall = 1, isReturn = 1, isBarrier = 1, isPredicable = 0, + isTerminator = 1, isCodeGenOnly = 1 in +def TCRETURNi : JInst<(outs), (ins calltarget:$dst), "", []>; -// 16 bit atomic load -def : Pat<(atomic_load_16 ADDRriS11_1:$src1), - (i32 (L2_loadruh_io AddrFI:$src1, 0))>; - -def : Pat<(atomic_load_16 (add (i32 IntRegs:$src1), s11_1ImmPred:$offset)), - (i32 (L2_loadruh_io (i32 IntRegs:$src1), s11_1ImmPred:$offset))>; - -def : Pat<(atomic_load_32 ADDRriS11_2:$src1), - (i32 (LDriw ADDRriS11_2:$src1))>; - -def : Pat<(atomic_load_32 (add (i32 IntRegs:$src1), s11_2ImmPred:$offset)), - (i32 (LDriw_indexed (i32 IntRegs:$src1), s11_2ImmPred:$offset))>; - -// 64 bit atomic load -def : Pat<(atomic_load_64 ADDRriS11_3:$src1), - (i64 (LDrid ADDRriS11_3:$src1))>; - -def : Pat<(atomic_load_64 (add (i32 IntRegs:$src1), s11_3ImmPred:$offset)), - (i64 (LDrid_indexed (i32 IntRegs:$src1), s11_3ImmPred:$offset))>; - - -def : Pat<(atomic_store_8 ADDRriS11_0:$src2, (i32 IntRegs:$src1)), - (STrib ADDRriS11_0:$src2, (i32 IntRegs:$src1))>; - -def : Pat<(atomic_store_8 (add (i32 IntRegs:$src2), s11_0ImmPred:$offset), - (i32 IntRegs:$src1)), - (STrib_indexed (i32 IntRegs:$src2), s11_0ImmPred:$offset, - (i32 IntRegs:$src1))>; - - -def : Pat<(atomic_store_16 ADDRriS11_1:$src2, (i32 IntRegs:$src1)), - (STrih ADDRriS11_1:$src2, (i32 IntRegs:$src1))>; - -def : Pat<(atomic_store_16 (i32 IntRegs:$src1), - (add (i32 IntRegs:$src2), s11_1ImmPred:$offset)), - (STrih_indexed (i32 IntRegs:$src2), s11_1ImmPred:$offset, - (i32 IntRegs:$src1))>; - -def : Pat<(atomic_store_32 ADDRriS11_2:$src2, (i32 IntRegs:$src1)), - (STriw ADDRriS11_2:$src2, (i32 IntRegs:$src1))>; - -def : Pat<(atomic_store_32 (add (i32 IntRegs:$src2), s11_2ImmPred:$offset), - (i32 IntRegs:$src1)), - (STriw_indexed (i32 IntRegs:$src2), s11_2ImmPred:$offset, - (i32 IntRegs:$src1))>; - - - - -def : Pat<(atomic_store_64 ADDRriS11_3:$src2, (i64 DoubleRegs:$src1)), - (STrid ADDRriS11_3:$src2, (i64 DoubleRegs:$src1))>; - -def : Pat<(atomic_store_64 (add (i32 IntRegs:$src2), s11_3ImmPred:$offset), - (i64 DoubleRegs:$src1)), - (STrid_indexed (i32 IntRegs:$src2), s11_3ImmPred:$offset, - (i64 DoubleRegs:$src1))>; +//Tail calls. +def: Pat<(HexagonTCRet tglobaladdr:$dst), + (TCRETURNi tglobaladdr:$dst)>; +def: Pat<(HexagonTCRet texternalsym:$dst), + (TCRETURNi texternalsym:$dst)>; +def: Pat<(HexagonTCRet (i32 IntRegs:$dst)), + (TCRETURNr IntRegs:$dst)>; // Map from r0 = and(r1, 65535) to r0 = zxth(r1) -def : Pat <(and (i32 IntRegs:$src1), 65535), - (A2_zxth (i32 IntRegs:$src1))>; +def: Pat<(and (i32 IntRegs:$src1), 65535), + (A2_zxth IntRegs:$src1)>; // Map from r0 = and(r1, 255) to r0 = zxtb(r1). -def : Pat <(and (i32 IntRegs:$src1), 255), - (A2_zxtb (i32 IntRegs:$src1))>; +def: Pat<(and (i32 IntRegs:$src1), 255), + (A2_zxtb IntRegs:$src1)>; // Map Add(p1, true) to p1 = not(p1). // Add(p1, false) should never be produced, // if it does, it got to be mapped to NOOP. -def : Pat <(add (i1 PredRegs:$src1), -1), - (C2_not (i1 PredRegs:$src1))>; +def: Pat<(add (i1 PredRegs:$src1), -1), + (C2_not PredRegs:$src1)>; // Map from p0 = pnot(p0); r0 = mux(p0, #i, #j) => r0 = mux(p0, #j, #i). -def : Pat <(select (not (i1 PredRegs:$src1)), s8ImmPred:$src2, s8ImmPred:$src3), - (i32 (TFR_condset_ii (i1 PredRegs:$src1), s8ImmPred:$src3, - s8ImmPred:$src2))>; +def: Pat<(select (not (i1 PredRegs:$src1)), s8ImmPred:$src2, s32ImmPred:$src3), + (C2_muxii PredRegs:$src1, s32ImmPred:$src3, s8ImmPred:$src2)>; // Map from p0 = pnot(p0); r0 = select(p0, #i, r1) -// => r0 = TFR_condset_ri(p0, r1, #i) -def : Pat <(select (not (i1 PredRegs:$src1)), s12ImmPred:$src2, - (i32 IntRegs:$src3)), - (i32 (TFR_condset_ri (i1 PredRegs:$src1), (i32 IntRegs:$src3), - s12ImmPred:$src2))>; +// => r0 = C2_muxir(p0, r1, #i) +def: Pat<(select (not (i1 PredRegs:$src1)), s32ImmPred:$src2, + (i32 IntRegs:$src3)), + (C2_muxir PredRegs:$src1, IntRegs:$src3, s32ImmPred:$src2)>; // Map from p0 = pnot(p0); r0 = mux(p0, r1, #i) -// => r0 = TFR_condset_ir(p0, #i, r1) -def : Pat <(select (not (i1 PredRegs:$src1)), IntRegs:$src2, s12ImmPred:$src3), - (i32 (TFR_condset_ir (i1 PredRegs:$src1), s12ImmPred:$src3, - (i32 IntRegs:$src2)))>; +// => r0 = C2_muxri (p0, #i, r1) +def: Pat<(select (not (i1 PredRegs:$src1)), IntRegs:$src2, s32ImmPred:$src3), + (C2_muxri PredRegs:$src1, s32ImmPred:$src3, IntRegs:$src2)>; // Map from p0 = pnot(p0); if (p0) jump => if (!p0) jump. -def : Pat <(brcond (not (i1 PredRegs:$src1)), bb:$offset), - (J2_jumpf (i1 PredRegs:$src1), bb:$offset)>; - -// Map from p2 = pnot(p2); p1 = and(p0, p2) => p1 = and(p0, !p2). -def : Pat <(and (i1 PredRegs:$src1), (not (i1 PredRegs:$src2))), - (i1 (C2_andn (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>; - - -let AddedComplexity = 100 in -def : Pat <(i64 (zextloadi1 (HexagonCONST32 tglobaladdr:$global))), - (i64 (A2_combinew (A2_tfrsi 0), - (L2_loadrub_io (CONST32_set tglobaladdr:$global), 0)))>, - Requires<[NoV4T]>; - -// Map from i1 loads to 32 bits. This assumes that the i1* is byte aligned. -let AddedComplexity = 10 in -def : Pat <(i32 (zextloadi1 ADDRriS11_0:$addr)), - (i32 (A2_and (i32 (L2_loadrb_io AddrFI:$addr, 0)), (A2_tfrsi 0x1)))>; +def: Pat<(brcond (not (i1 PredRegs:$src1)), bb:$offset), + (J2_jumpf PredRegs:$src1, bb:$offset)>; // Map from Rdd = sign_extend_inreg(Rss, i32) -> Rdd = A2_sxtw(Rss.lo). -def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i32)), - (i64 (A2_sxtw (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg))))>; +def: Pat<(i64 (sext_inreg (i64 DoubleRegs:$src1), i32)), + (A2_sxtw (LoReg DoubleRegs:$src1))>; -// Map from Rdd = sign_extend_inreg(Rss, i16) -> Rdd = A2_sxtw(SXTH(Rss.lo)). -def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i16)), - (i64 (A2_sxtw (i32 (A2_sxth (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), - subreg_loreg))))))>; +// Map from Rdd = sign_extend_inreg(Rss, i16) -> Rdd = A2_sxtw(A2_sxth(Rss.lo)). +def: Pat<(i64 (sext_inreg (i64 DoubleRegs:$src1), i16)), + (A2_sxtw (A2_sxth (LoReg DoubleRegs:$src1)))>; -// Map from Rdd = sign_extend_inreg(Rss, i8) -> Rdd = A2_sxtw(SXTB(Rss.lo)). -def : Pat <(i64 (sext_inreg (i64 DoubleRegs:$src1), i8)), - (i64 (A2_sxtw (i32 (A2_sxtb (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), - subreg_loreg))))))>; +// Map from Rdd = sign_extend_inreg(Rss, i8) -> Rdd = A2_sxtw(A2_sxtb(Rss.lo)). +def: Pat<(i64 (sext_inreg (i64 DoubleRegs:$src1), i8)), + (A2_sxtw (A2_sxtb (LoReg DoubleRegs:$src1)))>; // We want to prevent emitting pnot's as much as possible. // Map brcond with an unsupported setcc to a J2_jumpf. @@ -3787,144 +4980,68 @@ def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), s10ImmPred:$src2)), bb:$offset), (J2_jumpf (C2_cmpeqi (i32 IntRegs:$src1), s10ImmPred:$src2), bb:$offset)>; -def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 -1))), bb:$offset), - (J2_jumpf (i1 PredRegs:$src1), bb:$offset)>; +def: Pat<(brcond (i1 (setne (i1 PredRegs:$src1), (i1 -1))), bb:$offset), + (J2_jumpf PredRegs:$src1, bb:$offset)>; -def : Pat <(brcond (i1 (setne (i1 PredRegs:$src1), (i1 0))), bb:$offset), - (J2_jumpt (i1 PredRegs:$src1), bb:$offset)>; +def: Pat<(brcond (i1 (setne (i1 PredRegs:$src1), (i1 0))), bb:$offset), + (J2_jumpt PredRegs:$src1, bb:$offset)>; // cmp.lt(Rs, Imm) -> !cmp.ge(Rs, Imm) -> !cmp.gt(Rs, Imm-1) -def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)), - bb:$offset), - (J2_jumpf (C2_cmpgti (i32 IntRegs:$src1), - (DEC_CONST_SIGNED s8ImmPred:$src2)), bb:$offset)>; - -// cmp.lt(r0, r1) -> cmp.gt(r1, r0) -def : Pat <(brcond (i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))), - bb:$offset), - (J2_jumpt (C2_cmpgt (i32 IntRegs:$src2), (i32 IntRegs:$src1)), bb:$offset)>; - -def : Pat <(brcond (i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), - bb:$offset), - (J2_jumpf (C2_cmpgtup (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)), - bb:$offset)>; - -def : Pat <(brcond (i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))), - bb:$offset), - (J2_jumpf (C2_cmpgtu (i32 IntRegs:$src1), (i32 IntRegs:$src2)), - bb:$offset)>; - -def : Pat <(brcond (i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), - bb:$offset), - (J2_jumpf (C2_cmpgtup (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)), - bb:$offset)>; +def: Pat<(brcond (i1 (setlt (i32 IntRegs:$src1), s8ImmPred:$src2)), bb:$offset), + (J2_jumpf (C2_cmpgti IntRegs:$src1, (DEC_CONST_SIGNED s8ImmPred:$src2)), + bb:$offset)>; // Map from a 64-bit select to an emulated 64-bit mux. // Hexagon does not support 64-bit MUXes; so emulate with combines. -def : Pat <(select (i1 PredRegs:$src1), (i64 DoubleRegs:$src2), - (i64 DoubleRegs:$src3)), - (i64 (A2_combinew (i32 (C2_mux (i1 PredRegs:$src1), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), - subreg_hireg)), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3), - subreg_hireg)))), - (i32 (C2_mux (i1 PredRegs:$src1), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), - subreg_loreg)), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src3), - subreg_loreg))))))>; +def: Pat<(select (i1 PredRegs:$src1), (i64 DoubleRegs:$src2), + (i64 DoubleRegs:$src3)), + (A2_combinew (C2_mux PredRegs:$src1, (HiReg DoubleRegs:$src2), + (HiReg DoubleRegs:$src3)), + (C2_mux PredRegs:$src1, (LoReg DoubleRegs:$src2), + (LoReg DoubleRegs:$src3)))>; // Map from a 1-bit select to logical ops. // From LegalizeDAG.cpp: (B1 ? B2 : B3) <=> (B1 & B2)|(!B1&B3). -def : Pat <(select (i1 PredRegs:$src1), (i1 PredRegs:$src2), - (i1 PredRegs:$src3)), - (C2_or (C2_and (i1 PredRegs:$src1), (i1 PredRegs:$src2)), - (C2_and (C2_not (i1 PredRegs:$src1)), (i1 PredRegs:$src3)))>; - -// Map Pd = load(addr) -> Rs = load(addr); Pd = Rs. -def : Pat<(i1 (load ADDRriS11_2:$addr)), - (i1 (C2_tfrrp (i32 (L2_loadrb_io AddrFI:$addr, 0))))>; +def: Pat<(select (i1 PredRegs:$src1), (i1 PredRegs:$src2), (i1 PredRegs:$src3)), + (C2_or (C2_and PredRegs:$src1, PredRegs:$src2), + (C2_and (C2_not PredRegs:$src1), PredRegs:$src3))>; // Map for truncating from 64 immediates to 32 bit immediates. -def : Pat<(i32 (trunc (i64 DoubleRegs:$src))), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), subreg_loreg))>; +def: Pat<(i32 (trunc (i64 DoubleRegs:$src))), + (LoReg DoubleRegs:$src)>; // Map for truncating from i64 immediates to i1 bit immediates. -def : Pat<(i1 (trunc (i64 DoubleRegs:$src))), - (i1 (C2_tfrrp (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), - subreg_loreg))))>; - -// Map memb(Rs) = Rdd -> memb(Rs) = Rt. -def : Pat<(truncstorei8 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), - (STrib ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), - subreg_loreg)))>; - -// Map memh(Rs) = Rdd -> memh(Rs) = Rt. -def : Pat<(truncstorei16 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), - (STrih ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), - subreg_loreg)))>; -// Map memw(Rs) = Rdd -> memw(Rs) = Rt -def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), - (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), - subreg_loreg)))>; - -// Map memw(Rs) = Rdd -> memw(Rs) = Rt. -def : Pat<(truncstorei32 (i64 DoubleRegs:$src), ADDRriS11_0:$addr), - (STriw ADDRriS11_0:$addr, (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src), - subreg_loreg)))>; - -// Map from i1 = constant<-1>; memw(addr) = i1 -> r0 = 1; memw(addr) = r0. -def : Pat<(store (i1 -1), ADDRriS11_2:$addr), - (STrib ADDRriS11_2:$addr, (A2_tfrsi 1))>; - - -// Map from i1 = constant<-1>; store i1 -> r0 = 1; store r0. -def : Pat<(store (i1 -1), ADDRriS11_2:$addr), - (STrib ADDRriS11_2:$addr, (A2_tfrsi 1))>; - -// Map from memb(Rs) = Pd -> Rt = mux(Pd, #0, #1); store Rt. -def : Pat<(store (i1 PredRegs:$src1), ADDRriS11_2:$addr), - (STrib ADDRriS11_2:$addr, (i32 (C2_muxii (i1 PredRegs:$src1), 1, 0)) )>; - -// Map Rdd = anyext(Rs) -> Rdd = A2_sxtw(Rs). -// Hexagon_TODO: We can probably use combine but that will cost 2 instructions. -// Better way to do this? -def : Pat<(i64 (anyext (i32 IntRegs:$src1))), - (i64 (A2_sxtw (i32 IntRegs:$src1)))>; - -// Map cmple -> cmpgt. +def: Pat<(i1 (trunc (i64 DoubleRegs:$src))), + (C2_tfrrp (LoReg DoubleRegs:$src))>; + // rs <= rt -> !(rs > rt). -def : Pat<(i1 (setle (i32 IntRegs:$src1), s10ExtPred:$src2)), - (i1 (C2_not (C2_cmpgti (i32 IntRegs:$src1), s10ExtPred:$src2)))>; +let AddedComplexity = 30 in +def: Pat<(i1 (setle (i32 IntRegs:$src1), s32ImmPred:$src2)), + (C2_not (C2_cmpgti IntRegs:$src1, s32ImmPred:$src2))>; // rs <= rt -> !(rs > rt). def : Pat<(i1 (setle (i32 IntRegs:$src1), (i32 IntRegs:$src2))), (i1 (C2_not (C2_cmpgt (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>; // Rss <= Rtt -> !(Rss > Rtt). -def : Pat<(i1 (setle (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), - (i1 (C2_not (C2_cmpgtp (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>; +def: Pat<(i1 (setle (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (C2_not (C2_cmpgtp DoubleRegs:$src1, DoubleRegs:$src2))>; // Map cmpne -> cmpeq. // Hexagon_TODO: We should improve on this. // rs != rt -> !(rs == rt). -def : Pat <(i1 (setne (i32 IntRegs:$src1), s10ExtPred:$src2)), - (i1 (C2_not(i1 (C2_cmpeqi (i32 IntRegs:$src1), s10ExtPred:$src2))))>; - -// Map cmpne(Rs) -> !cmpeqe(Rs). -// rs != rt -> !(rs == rt). -def : Pat <(i1 (setne (i32 IntRegs:$src1), (i32 IntRegs:$src2))), - (i1 (C2_not (i1 (C2_cmpeq (i32 IntRegs:$src1), (i32 IntRegs:$src2)))))>; +let AddedComplexity = 30 in +def: Pat<(i1 (setne (i32 IntRegs:$src1), s32ImmPred:$src2)), + (C2_not (C2_cmpeqi IntRegs:$src1, s32ImmPred:$src2))>; // Convert setne back to xor for hexagon since we compute w/ pred registers. -def : Pat <(i1 (setne (i1 PredRegs:$src1), (i1 PredRegs:$src2))), - (i1 (C2_xor (i1 PredRegs:$src1), (i1 PredRegs:$src2)))>; +def: Pat<(i1 (setne (i1 PredRegs:$src1), (i1 PredRegs:$src2))), + (C2_xor PredRegs:$src1, PredRegs:$src2)>; // Map cmpne(Rss) -> !cmpew(Rss). // rs != rt -> !(rs == rt). -def : Pat <(i1 (setne (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), - (i1 (C2_not (i1 (C2_cmpeqp (i64 DoubleRegs:$src1), - (i64 DoubleRegs:$src2)))))>; +def: Pat<(i1 (setne (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (C2_not (C2_cmpeqp DoubleRegs:$src1, DoubleRegs:$src2))>; // Map cmpge(Rs, Rt) -> !(cmpgt(Rs, Rt). // rs >= rt -> !(rt > rs). @@ -3932,366 +5049,120 @@ def : Pat <(i1 (setge (i32 IntRegs:$src1), (i32 IntRegs:$src2))), (i1 (C2_not (i1 (C2_cmpgt (i32 IntRegs:$src2), (i32 IntRegs:$src1)))))>; // cmpge(Rs, Imm) -> cmpgt(Rs, Imm-1) -def : Pat <(i1 (setge (i32 IntRegs:$src1), s8ExtPred:$src2)), - (i1 (C2_cmpgti (i32 IntRegs:$src1), (DEC_CONST_SIGNED s8ExtPred:$src2)))>; +let AddedComplexity = 30 in +def: Pat<(i1 (setge (i32 IntRegs:$src1), s32ImmPred:$src2)), + (C2_cmpgti IntRegs:$src1, (DEC_CONST_SIGNED s32ImmPred:$src2))>; // Map cmpge(Rss, Rtt) -> !cmpgt(Rtt, Rss). // rss >= rtt -> !(rtt > rss). -def : Pat <(i1 (setge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), - (i1 (C2_not (i1 (C2_cmpgtp (i64 DoubleRegs:$src2), - (i64 DoubleRegs:$src1)))))>; +def: Pat<(i1 (setge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (C2_not (C2_cmpgtp DoubleRegs:$src2, DoubleRegs:$src1))>; // Map cmplt(Rs, Imm) -> !cmpge(Rs, Imm). // !cmpge(Rs, Imm) -> !cmpgt(Rs, Imm-1). // rs < rt -> !(rs >= rt). -def : Pat <(i1 (setlt (i32 IntRegs:$src1), s8ExtPred:$src2)), - (i1 (C2_not (C2_cmpgti (i32 IntRegs:$src1), (DEC_CONST_SIGNED s8ExtPred:$src2))))>; - -// Map cmplt(Rs, Rt) -> cmpgt(Rt, Rs). -// rs < rt -> rt > rs. -// We can let assembler map it, or we can do in the compiler itself. -def : Pat <(i1 (setlt (i32 IntRegs:$src1), (i32 IntRegs:$src2))), - (i1 (C2_cmpgt (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>; - -// Map cmplt(Rss, Rtt) -> cmpgt(Rtt, Rss). -// rss < rtt -> (rtt > rss). -def : Pat <(i1 (setlt (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), - (i1 (C2_cmpgtp (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>; - -// Map from cmpltu(Rs, Rd) -> cmpgtu(Rd, Rs) -// rs < rt -> rt > rs. -// We can let assembler map it, or we can do in the compiler itself. -def : Pat <(i1 (setult (i32 IntRegs:$src1), (i32 IntRegs:$src2))), - (i1 (C2_cmpgtu (i32 IntRegs:$src2), (i32 IntRegs:$src1)))>; - -// Map from cmpltu(Rss, Rdd) -> cmpgtu(Rdd, Rss). -// rs < rt -> rt > rs. -def : Pat <(i1 (setult (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), - (i1 (C2_cmpgtup (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1)))>; +let AddedComplexity = 30 in +def: Pat<(i1 (setlt (i32 IntRegs:$src1), s32ImmPred:$src2)), + (C2_not (C2_cmpgti IntRegs:$src1, + (DEC_CONST_SIGNED s32ImmPred:$src2)))>; // Generate cmpgeu(Rs, #0) -> cmpeq(Rs, Rs) -def : Pat <(i1 (setuge (i32 IntRegs:$src1), 0)), - (i1 (C2_cmpeq (i32 IntRegs:$src1), (i32 IntRegs:$src1)))>; +def: Pat<(i1 (setuge (i32 IntRegs:$src1), 0)), + (C2_cmpeq IntRegs:$src1, IntRegs:$src1)>; // Generate cmpgeu(Rs, #u8) -> cmpgtu(Rs, #u8 -1) -def : Pat <(i1 (setuge (i32 IntRegs:$src1), u8ExtPred:$src2)), - (i1 (C2_cmpgtui (i32 IntRegs:$src1), (DEC_CONST_UNSIGNED u8ExtPred:$src2)))>; +def: Pat<(i1 (setuge (i32 IntRegs:$src1), u32ImmPred:$src2)), + (C2_cmpgtui IntRegs:$src1, (DEC_CONST_UNSIGNED u32ImmPred:$src2))>; // Generate cmpgtu(Rs, #u9) -def : Pat <(i1 (setugt (i32 IntRegs:$src1), u9ExtPred:$src2)), - (i1 (C2_cmpgtui (i32 IntRegs:$src1), u9ExtPred:$src2))>; - -// Map from Rs >= Rt -> !(Rt > Rs). -// rs >= rt -> !(rt > rs). -def : Pat <(i1 (setuge (i32 IntRegs:$src1), (i32 IntRegs:$src2))), - (i1 (C2_not (C2_cmpgtu (i32 IntRegs:$src2), (i32 IntRegs:$src1))))>; +def: Pat<(i1 (setugt (i32 IntRegs:$src1), u32ImmPred:$src2)), + (C2_cmpgtui IntRegs:$src1, u32ImmPred:$src2)>; // Map from Rs >= Rt -> !(Rt > Rs). // rs >= rt -> !(rt > rs). -def : Pat <(i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), - (i1 (C2_not (C2_cmpgtup (i64 DoubleRegs:$src2), (i64 DoubleRegs:$src1))))>; - -// Map from cmpleu(Rs, Rt) -> !cmpgtu(Rs, Rt). -// Map from (Rs <= Rt) -> !(Rs > Rt). -def : Pat <(i1 (setule (i32 IntRegs:$src1), (i32 IntRegs:$src2))), - (i1 (C2_not (C2_cmpgtu (i32 IntRegs:$src1), (i32 IntRegs:$src2))))>; +def: Pat<(i1 (setuge (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (C2_not (C2_cmpgtup DoubleRegs:$src2, DoubleRegs:$src1))>; // Map from cmpleu(Rss, Rtt) -> !cmpgtu(Rss, Rtt-1). // Map from (Rs <= Rt) -> !(Rs > Rt). -def : Pat <(i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), - (i1 (C2_not (C2_cmpgtup (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))))>; +def: Pat<(i1 (setule (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2))), + (C2_not (C2_cmpgtup DoubleRegs:$src1, DoubleRegs:$src2))>; // Sign extends. // i1 -> i32 -def : Pat <(i32 (sext (i1 PredRegs:$src1))), - (i32 (C2_muxii (i1 PredRegs:$src1), -1, 0))>; +def: Pat<(i32 (sext (i1 PredRegs:$src1))), + (C2_muxii PredRegs:$src1, -1, 0)>; // i1 -> i64 -def : Pat <(i64 (sext (i1 PredRegs:$src1))), - (i64 (A2_combinew (A2_tfrsi -1), (C2_muxii (i1 PredRegs:$src1), -1, 0)))>; - -// Convert sign-extended load back to load and sign extend. -// i8 -> i64 -def: Pat <(i64 (sextloadi8 ADDRriS11_0:$src1)), - (i64 (A2_sxtw (L2_loadrb_io AddrFI:$src1, 0)))>; - -// Convert any-extended load back to load and sign extend. -// i8 -> i64 -def: Pat <(i64 (extloadi8 ADDRriS11_0:$src1)), - (i64 (A2_sxtw (L2_loadrb_io AddrFI:$src1, 0)))>; - -// Convert sign-extended load back to load and sign extend. -// i16 -> i64 -def: Pat <(i64 (sextloadi16 ADDRriS11_1:$src1)), - (i64 (A2_sxtw (L2_loadrh_io AddrFI:$src1, 0)))>; - -// Convert sign-extended load back to load and sign extend. -// i32 -> i64 -def: Pat <(i64 (sextloadi32 ADDRriS11_2:$src1)), - (i64 (A2_sxtw (LDriw ADDRriS11_2:$src1)))>; - +def: Pat<(i64 (sext (i1 PredRegs:$src1))), + (A2_combinew (A2_tfrsi -1), (C2_muxii PredRegs:$src1, -1, 0))>; // Zero extends. // i1 -> i32 -def : Pat <(i32 (zext (i1 PredRegs:$src1))), - (i32 (C2_muxii (i1 PredRegs:$src1), 1, 0))>; - -// i1 -> i64 -def : Pat <(i64 (zext (i1 PredRegs:$src1))), - (i64 (A2_combinew (A2_tfrsi 0), (C2_muxii (i1 PredRegs:$src1), 1, 0)))>, - Requires<[NoV4T]>; - -// i32 -> i64 -def : Pat <(i64 (zext (i32 IntRegs:$src1))), - (i64 (A2_combinew (A2_tfrsi 0), (i32 IntRegs:$src1)))>, - Requires<[NoV4T]>; - -// i8 -> i64 -def: Pat <(i64 (zextloadi8 ADDRriS11_0:$src1)), - (i64 (A2_combinew (A2_tfrsi 0), (L2_loadrub_io AddrFI:$src1, 0)))>, - Requires<[NoV4T]>; - -let AddedComplexity = 20 in -def: Pat <(i64 (zextloadi8 (add (i32 IntRegs:$src1), - s11_0ExtPred:$offset))), - (i64 (A2_combinew (A2_tfrsi 0), (L2_loadrub_io IntRegs:$src1, - s11_0ExtPred:$offset)))>, - Requires<[NoV4T]>; - -// i1 -> i64 -def: Pat <(i64 (zextloadi1 ADDRriS11_0:$src1)), - (i64 (A2_combinew (A2_tfrsi 0), (L2_loadrub_io AddrFI:$src1, 0)))>, - Requires<[NoV4T]>; - -let AddedComplexity = 20 in -def: Pat <(i64 (zextloadi1 (add (i32 IntRegs:$src1), - s11_0ExtPred:$offset))), - (i64 (A2_combinew (A2_tfrsi 0), (L2_loadrub_io IntRegs:$src1, - s11_0ExtPred:$offset)))>, - Requires<[NoV4T]>; - -// i16 -> i64 -def: Pat <(i64 (zextloadi16 ADDRriS11_1:$src1)), - (i64 (A2_combinew (A2_tfrsi 0), (L2_loadruh_io AddrFI:$src1, 0)))>, - Requires<[NoV4T]>; - -let AddedComplexity = 20 in -def: Pat <(i64 (zextloadi16 (add (i32 IntRegs:$src1), - s11_1ExtPred:$offset))), - (i64 (A2_combinew (A2_tfrsi 0), (L2_loadruh_io IntRegs:$src1, - s11_1ExtPred:$offset)))>, - Requires<[NoV4T]>; - -// i32 -> i64 -def: Pat <(i64 (zextloadi32 ADDRriS11_2:$src1)), - (i64 (A2_combinew (A2_tfrsi 0), (LDriw ADDRriS11_2:$src1)))>, - Requires<[NoV4T]>; - -let AddedComplexity = 100 in -def: Pat <(i64 (zextloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))), - (i64 (A2_combinew (A2_tfrsi 0), (LDriw_indexed IntRegs:$src1, - s11_2ExtPred:$offset)))>, - Requires<[NoV4T]>; - -let AddedComplexity = 10 in -def: Pat <(i32 (zextloadi1 ADDRriS11_0:$src1)), - (i32 (LDriw ADDRriS11_0:$src1))>; - -// Map from Rs = Pd to Pd = mux(Pd, #1, #0) -def : Pat <(i32 (zext (i1 PredRegs:$src1))), - (i32 (C2_muxii (i1 PredRegs:$src1), 1, 0))>; +def: Pat<(i32 (zext (i1 PredRegs:$src1))), + (C2_muxii PredRegs:$src1, 1, 0)>; // Map from Rs = Pd to Pd = mux(Pd, #1, #0) -def : Pat <(i32 (anyext (i1 PredRegs:$src1))), - (i32 (C2_muxii (i1 PredRegs:$src1), 1, 0))>; - -// Map from Rss = Pd to Rdd = A2_sxtw (mux(Pd, #1, #0)) -def : Pat <(i64 (anyext (i1 PredRegs:$src1))), - (i64 (A2_sxtw (i32 (C2_muxii (i1 PredRegs:$src1), 1, 0))))>; - - -let AddedComplexity = 100 in -def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), - (i32 32))), - (i64 (zextloadi32 (i32 (add IntRegs:$src2, - s11_2ExtPred:$offset2)))))), - (i64 (A2_combinew (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), - (LDriw_indexed IntRegs:$src2, - s11_2ExtPred:$offset2)))>; - -def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), - (i32 32))), - (i64 (zextloadi32 ADDRriS11_2:$srcLow)))), - (i64 (A2_combinew (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), - (LDriw ADDRriS11_2:$srcLow)))>; - -def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), - (i32 32))), - (i64 (zext (i32 IntRegs:$srcLow))))), - (i64 (A2_combinew (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), - IntRegs:$srcLow))>; - -let AddedComplexity = 100 in -def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), - (i32 32))), - (i64 (zextloadi32 (i32 (add IntRegs:$src2, - s11_2ExtPred:$offset2)))))), - (i64 (A2_combinew (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), - (LDriw_indexed IntRegs:$src2, - s11_2ExtPred:$offset2)))>; - -def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), - (i32 32))), - (i64 (zextloadi32 ADDRriS11_2:$srcLow)))), - (i64 (A2_combinew (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), - (LDriw ADDRriS11_2:$srcLow)))>; - -def: Pat<(i64 (or (i64 (shl (i64 DoubleRegs:$srcHigh), - (i32 32))), - (i64 (zext (i32 IntRegs:$srcLow))))), - (i64 (A2_combinew (EXTRACT_SUBREG (i64 DoubleRegs:$srcHigh), subreg_loreg), - IntRegs:$srcLow))>; - -// Any extended 64-bit load. -// anyext i32 -> i64 -def: Pat <(i64 (extloadi32 ADDRriS11_2:$src1)), - (i64 (A2_combinew (A2_tfrsi 0), (LDriw ADDRriS11_2:$src1)))>, - Requires<[NoV4T]>; - -// When there is an offset we should prefer the pattern below over the pattern above. -// The complexity of the above is 13 (gleaned from HexagonGenDAGIsel.inc) -// So this complexity below is comfortably higher to allow for choosing the below. -// If this is not done then we generate addresses such as -// ******************************************** -// r1 = add (r0, #4) -// r1 = memw(r1 + #0) -// instead of -// r1 = memw(r0 + #4) -// ******************************************** -let AddedComplexity = 100 in -def: Pat <(i64 (extloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))), - (i64 (A2_combinew (A2_tfrsi 0), (LDriw_indexed IntRegs:$src1, - s11_2ExtPred:$offset)))>, - Requires<[NoV4T]>; - -// anyext i16 -> i64. -def: Pat <(i64 (extloadi16 ADDRriS11_2:$src1)), - (i64 (A2_combinew (A2_tfrsi 0), (L2_loadrh_io AddrFI:$src1, 0)))>, - Requires<[NoV4T]>; - -let AddedComplexity = 20 in -def: Pat <(i64 (extloadi16 (add (i32 IntRegs:$src1), - s11_1ExtPred:$offset))), - (i64 (A2_combinew (A2_tfrsi 0), (L2_loadrh_io IntRegs:$src1, - s11_1ExtPred:$offset)))>, - Requires<[NoV4T]>; +def: Pat<(i32 (anyext (i1 PredRegs:$src1))), + (C2_muxii PredRegs:$src1, 1, 0)>; -// Map from Rdd = zxtw(Rs) -> Rdd = combine(0, Rs). -def : Pat<(i64 (zext (i32 IntRegs:$src1))), - (i64 (A2_combinew (A2_tfrsi 0), (i32 IntRegs:$src1)))>, - Requires<[NoV4T]>; +// Map from Rss = Pd to Rdd = sxtw (mux(Pd, #1, #0)) +def: Pat<(i64 (anyext (i1 PredRegs:$src1))), + (A2_sxtw (C2_muxii PredRegs:$src1, 1, 0))>; // Multiply 64-bit unsigned and use upper result. def : Pat <(mulhu (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)), - (i64 - (M2_dpmpyuu_acc_s0 - (i64 - (A2_combinew - (A2_tfrsi 0), - (i32 - (EXTRACT_SUBREG - (i64 - (S2_lsr_i_p - (i64 - (M2_dpmpyuu_acc_s0 - (i64 - (M2_dpmpyuu_acc_s0 - (i64 - (A2_combinew (A2_tfrsi 0), - (i32 - (EXTRACT_SUBREG - (i64 - (S2_lsr_i_p - (i64 - (M2_dpmpyuu_s0 - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), - subreg_loreg)), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), - subreg_loreg)))), 32)), - subreg_loreg)))), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))), - 32)), subreg_loreg)))), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>; - -// Multiply 64-bit signed and use upper result. -def : Pat <(mulhs (i64 DoubleRegs:$src1), (i64 DoubleRegs:$src2)), - (i64 - (M2_dpmpyss_acc_s0 - (i64 - (A2_combinew (A2_tfrsi 0), - (i32 - (EXTRACT_SUBREG - (i64 - (S2_lsr_i_p - (i64 - (M2_dpmpyss_acc_s0 - (i64 - (M2_dpmpyss_acc_s0 - (i64 - (A2_combinew (A2_tfrsi 0), - (i32 - (EXTRACT_SUBREG - (i64 - (S2_lsr_i_p - (i64 - (M2_dpmpyuu_s0 - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), - subreg_loreg)), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), - subreg_loreg)))), 32)), - subreg_loreg)))), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_loreg)))), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_loreg)), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg)))), - 32)), subreg_loreg)))), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src1), subreg_hireg)), - (i32 (EXTRACT_SUBREG (i64 DoubleRegs:$src2), subreg_hireg))))>; + (A2_addp + (M2_dpmpyuu_acc_s0 + (S2_lsr_i_p + (A2_addp + (M2_dpmpyuu_acc_s0 + (S2_lsr_i_p (M2_dpmpyuu_s0 (LoReg $src1), (LoReg $src2)), 32), + (HiReg $src1), + (LoReg $src2)), + (A2_combinew (A2_tfrsi 0), + (LoReg (M2_dpmpyuu_s0 (LoReg $src1), (HiReg $src2))))), + 32), + (HiReg $src1), + (HiReg $src2)), + (S2_lsr_i_p (M2_dpmpyuu_s0 (LoReg $src1), (HiReg $src2)), 32) +)>; // Hexagon specific ISD nodes. -//def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>]>; -def SDTHexagonADJDYNALLOC : SDTypeProfile<1, 2, - [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; -def Hexagon_ADJDYNALLOC : SDNode<"HexagonISD::ADJDYNALLOC", - SDTHexagonADJDYNALLOC>; -// Needed to tag these instructions for stack layout. -let usesCustomInserter = 1 in -def ADJDYNALLOC : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1, - s16Imm:$src2), - "$dst = add($src1, #$src2)", - [(set (i32 IntRegs:$dst), - (Hexagon_ADJDYNALLOC (i32 IntRegs:$src1), - s16ImmPred:$src2))]>; +def SDTHexagonALLOCA : SDTypeProfile<1, 2, + [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; +def HexagonALLOCA : SDNode<"HexagonISD::ALLOCA", SDTHexagonALLOCA, + [SDNPHasChain]>; + +// The reason for the custom inserter is to record all ALLOCA instructions +// in MachineFunctionInfo. +let Defs = [R29], isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 1, + usesCustomInserter = 1 in +def ALLOCA: ALU32Inst<(outs IntRegs:$Rd), + (ins IntRegs:$Rs, u32Imm:$A), "", + [(set (i32 IntRegs:$Rd), + (HexagonALLOCA (i32 IntRegs:$Rs), (i32 imm:$A)))]>; + +let isCodeGenOnly = 1, isPseudo = 1, Uses = [R30], hasSideEffects = 0 in +def ALIGNA : ALU32Inst<(outs IntRegs:$Rd), (ins u32Imm:$A), "", []>; def SDTHexagonARGEXTEND : SDTypeProfile<1, 1, [SDTCisVT<0, i32>]>; def Hexagon_ARGEXTEND : SDNode<"HexagonISD::ARGEXTEND", SDTHexagonARGEXTEND>; +let isCodeGenOnly = 1 in def ARGEXTEND : ALU32_rr <(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = $src1", [(set (i32 IntRegs:$dst), (Hexagon_ARGEXTEND (i32 IntRegs:$src1)))]>; let AddedComplexity = 100 in -def : Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND (i32 IntRegs:$src1)), i16)), - (COPY (i32 IntRegs:$src1))>; +def: Pat<(i32 (sext_inreg (Hexagon_ARGEXTEND (i32 IntRegs:$src1)), i16)), + (i32 IntRegs:$src1)>; -def HexagonWrapperJT: SDNode<"HexagonISD::WrapperJT", SDTIntUnaryOp>; +def HexagonJT: SDNode<"HexagonISD::JT", SDTIntUnaryOp>; +def HexagonCP: SDNode<"HexagonISD::CP", SDTIntUnaryOp>; -def : Pat<(HexagonWrapperJT tjumptable:$dst), - (i32 (CONST32_set_jt tjumptable:$dst))>; +def: Pat<(HexagonJT tjumptable:$dst), (A2_tfrsi s16Ext:$dst)>; +def: Pat<(HexagonCP tconstpool:$dst), (A2_tfrsi s16Ext:$dst)>; // XTYPE/SHIFT // @@ -4441,7 +5312,6 @@ let AddedComplexity = 100 in defm _xacc : xtype_imm_base< opc1, "^= ", OpNode, xor, 0b100, minOp>; } -let isCodeGenOnly = 0 in { defm S2_asr : xtype_imm_acc<"asr", sra, 0b00>; defm S2_lsr : xtype_imm_acc<"lsr", srl, 0b01>, @@ -4449,7 +5319,6 @@ defm S2_lsr : xtype_imm_acc<"lsr", srl, 0b01>, defm S2_asl : xtype_imm_acc<"asl", shl, 0b10>, xtype_xor_imm_acc<"asl", shl, 0b10>; -} multiclass xtype_reg_acc_rminOp> { let AddedComplexity = 100 in @@ -4475,12 +5344,10 @@ multiclass xtype_reg_acc minOp > { defm _r_p : xtype_reg_acc_p ; } -let isCodeGenOnly = 0 in { defm S2_asl : xtype_reg_acc<"asl", shl, 0b10>; defm S2_asr : xtype_reg_acc<"asr", sra, 0b00>; defm S2_lsr : xtype_reg_acc<"lsr", srl, 0b01>; defm S2_lsl : xtype_reg_acc<"lsl", shl, 0b11>; -} //===----------------------------------------------------------------------===// let hasSideEffects = 0 in @@ -4511,9 +5378,42 @@ class T_S3op_64 MajOp, bits<3> MinOp, bit SwapOps, : T_S3op_1 ; -let isCodeGenOnly = 0 in +let Itinerary = S_3op_tc_1_SLOT23 in { + def S2_shuffeb : T_S3op_64 < "shuffeb", 0b00, 0b010, 0>; + def S2_shuffeh : T_S3op_64 < "shuffeh", 0b00, 0b110, 0>; + def S2_shuffob : T_S3op_64 < "shuffob", 0b00, 0b100, 1>; + def S2_shuffoh : T_S3op_64 < "shuffoh", 0b10, 0b000, 1>; + + def S2_vtrunewh : T_S3op_64 < "vtrunewh", 0b10, 0b010, 0>; + def S2_vtrunowh : T_S3op_64 < "vtrunowh", 0b10, 0b100, 0>; +} + def S2_lfsp : T_S3op_64 < "lfs", 0b10, 0b110, 0>; +let hasSideEffects = 0 in +class T_S3op_2 MajOp, bit SwapOps> + : SInst < (outs DoubleRegs:$Rdd), + (ins DoubleRegs:$Rss, DoubleRegs:$Rtt, PredRegs:$Pu), + "$Rdd = "#mnemonic#"($Rss, $Rtt, $Pu)", + [], "", S_3op_tc_1_SLOT23 > { + bits<5> Rdd; + bits<5> Rss; + bits<5> Rtt; + bits<2> Pu; + + let IClass = 0b1100; + + let Inst{27-24} = 0b0010; + let Inst{23-21} = MajOp; + let Inst{20-16} = !if (SwapOps, Rtt, Rss); + let Inst{12-8} = !if (SwapOps, Rss, Rtt); + let Inst{6-5} = Pu; + let Inst{4-0} = Rdd; + } + +def S2_valignrb : T_S3op_2 < "valignb", 0b000, 1>; +def S2_vsplicerb : T_S3op_2 < "vspliceb", 0b100, 0>; + //===----------------------------------------------------------------------===// // Template class used by vector shift, vector rotate, vector neg, // 32-bit shift, 64-bit shifts, etc. @@ -4564,31 +5464,78 @@ class T_S3op_shiftVect MajOp, bits<2> MinOp> // Shift by register // Rdd=[asr|lsr|asl|lsl](Rss,Rt) -let isCodeGenOnly = 0 in { def S2_asr_r_p : T_S3op_shift64 < "asr", sra, 0b00>; def S2_lsr_r_p : T_S3op_shift64 < "lsr", srl, 0b01>; def S2_asl_r_p : T_S3op_shift64 < "asl", shl, 0b10>; def S2_lsl_r_p : T_S3op_shift64 < "lsl", shl, 0b11>; -} // Rd=[asr|lsr|asl|lsl](Rs,Rt) -let isCodeGenOnly = 0 in { def S2_asr_r_r : T_S3op_shift32<"asr", sra, 0b00>; def S2_lsr_r_r : T_S3op_shift32<"lsr", srl, 0b01>; def S2_asl_r_r : T_S3op_shift32<"asl", shl, 0b10>; def S2_lsl_r_r : T_S3op_shift32<"lsl", shl, 0b11>; -} // Shift by register with saturation // Rd=asr(Rs,Rt):sat // Rd=asl(Rs,Rt):sat -let Defs = [USR_OVF], isCodeGenOnly = 0 in { +let Defs = [USR_OVF] in { def S2_asr_r_r_sat : T_S3op_shift32_Sat<"asr", 0b00>; def S2_asl_r_r_sat : T_S3op_shift32_Sat<"asl", 0b10>; } +let hasNewValue = 1, hasSideEffects = 0 in +class T_S3op_8 MinOp, bit isSat, bit isRnd, bit hasShift, bit hasSplat = 0> + : SInst < (outs IntRegs:$Rd), + (ins DoubleRegs:$Rss, IntRegs:$Rt), + "$Rd = "#opc#"($Rss, $Rt"#!if(hasSplat, "*", "")#")" + #!if(hasShift, ":<<1", "") + #!if(isRnd, ":rnd", "") + #!if(isSat, ":sat", ""), + [], "", S_3op_tc_1_SLOT23 > { + bits<5> Rd; + bits<5> Rss; + bits<5> Rt; + + let IClass = 0b1100; + + let Inst{27-24} = 0b0101; + let Inst{20-16} = Rss; + let Inst{12-8} = Rt; + let Inst{7-5} = MinOp; + let Inst{4-0} = Rd; + } + +def S2_asr_r_svw_trun : T_S3op_8<"vasrw", 0b010, 0, 0, 0>; + +let Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23 in +def S2_vcrotate : T_S3op_shiftVect < "vcrotate", 0b11, 0b00>; + +let hasSideEffects = 0 in +class T_S3op_7 + : SInst <(outs DoubleRegs:$Rdd), + (ins DoubleRegs:$Rss, DoubleRegs:$Rtt, u3Imm:$u3), + "$Rdd = "#mnemonic#"($Rss, $Rtt, #$u3)" , + [], "", S_3op_tc_1_SLOT23 > { + bits<5> Rdd; + bits<5> Rss; + bits<5> Rtt; + bits<3> u3; + + let IClass = 0b1100; + + let Inst{27-24} = 0b0000; + let Inst{23} = MajOp; + let Inst{20-16} = !if(MajOp, Rss, Rtt); + let Inst{12-8} = !if(MajOp, Rtt, Rss); + let Inst{7-5} = u3; + let Inst{4-0} = Rdd; + } + +def S2_valignib : T_S3op_7 < "valignb", 0>; +def S2_vspliceib : T_S3op_7 < "vspliceb", 1>; + //===----------------------------------------------------------------------===// // Template class for 'insert bitfield' instructions //===----------------------------------------------------------------------===// @@ -4642,17 +5589,45 @@ class T_S2op_insert RegTyBits, RegisterClass RC, Operand ImmOp> // Rx=insert(Rs,Rtt) // Rx=insert(Rs,#u5,#U5) -let hasNewValue = 1, isCodeGenOnly = 0 in { +let hasNewValue = 1 in { def S2_insert_rp : T_S3op_insert <"insert", IntRegs>; def S2_insert : T_S2op_insert <0b1111, IntRegs, u5Imm>; } // Rxx=insert(Rss,Rtt) // Rxx=insert(Rss,#u6,#U6) -let isCodeGenOnly = 0 in { def S2_insertp_rp : T_S3op_insert<"insert", DoubleRegs>; def S2_insertp : T_S2op_insert <0b0011, DoubleRegs, u6Imm>; -} + + +def SDTHexagonINSERT: + SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, + SDTCisInt<0>, SDTCisVT<3, i32>, SDTCisVT<4, i32>]>; +def SDTHexagonINSERTRP: + SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, + SDTCisInt<0>, SDTCisVT<3, i64>]>; + +def HexagonINSERT : SDNode<"HexagonISD::INSERT", SDTHexagonINSERT>; +def HexagonINSERTRP : SDNode<"HexagonISD::INSERTRP", SDTHexagonINSERTRP>; + +def: Pat<(HexagonINSERT I32:$Rs, I32:$Rt, u5ImmPred:$u1, u5ImmPred:$u2), + (S2_insert I32:$Rs, I32:$Rt, u5ImmPred:$u1, u5ImmPred:$u2)>; +def: Pat<(HexagonINSERT I64:$Rs, I64:$Rt, u6ImmPred:$u1, u6ImmPred:$u2), + (S2_insertp I64:$Rs, I64:$Rt, u6ImmPred:$u1, u6ImmPred:$u2)>; +def: Pat<(HexagonINSERTRP I32:$Rs, I32:$Rt, I64:$Ru), + (S2_insert_rp I32:$Rs, I32:$Rt, I64:$Ru)>; +def: Pat<(HexagonINSERTRP I64:$Rs, I64:$Rt, I64:$Ru), + (S2_insertp_rp I64:$Rs, I64:$Rt, I64:$Ru)>; + +let AddedComplexity = 100 in +def: Pat<(or (or (shl (HexagonINSERT (i32 (zextloadi8 (add I32:$b, 2))), + (i32 (extloadi8 (add I32:$b, 3))), + 24, 8), + (i32 16)), + (shl (i32 (zextloadi8 (add I32:$b, 1))), (i32 8))), + (zextloadi8 I32:$b)), + (A2_swiz (L2_loadri_io I32:$b, 0))>; + //===----------------------------------------------------------------------===// // Template class for 'extract bitfield' instructions @@ -4710,18 +5685,39 @@ class T_S2op_extract RegTyBits, // Rdd=extractu(Rss,Rtt) // Rdd=extractu(Rss,#u6,#U6) -let isCodeGenOnly = 0 in { def S2_extractup_rp : T_S3op_64 < "extractu", 0b00, 0b000, 0>; def S2_extractup : T_S2op_extract <"extractu", 0b0001, DoubleRegs, u6Imm>; -} // Rd=extractu(Rs,Rtt) // Rd=extractu(Rs,#u5,#U5) -let hasNewValue = 1, isCodeGenOnly = 0 in { +let hasNewValue = 1 in { def S2_extractu_rp : T_S3op_extract<"extractu", 0b00>; def S2_extractu : T_S2op_extract <"extractu", 0b1101, IntRegs, u5Imm>; } +def SDTHexagonEXTRACTU: + SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<1>, + SDTCisVT<2, i32>, SDTCisVT<3, i32>]>; +def SDTHexagonEXTRACTURP: + SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<1>, + SDTCisVT<2, i64>]>; + +def HexagonEXTRACTU : SDNode<"HexagonISD::EXTRACTU", SDTHexagonEXTRACTU>; +def HexagonEXTRACTURP : SDNode<"HexagonISD::EXTRACTURP", SDTHexagonEXTRACTURP>; + +def: Pat<(HexagonEXTRACTU I32:$src1, u5ImmPred:$src2, u5ImmPred:$src3), + (S2_extractu I32:$src1, u5ImmPred:$src2, u5ImmPred:$src3)>; +def: Pat<(HexagonEXTRACTU I64:$src1, u6ImmPred:$src2, u6ImmPred:$src3), + (S2_extractup I64:$src1, u6ImmPred:$src2, u6ImmPred:$src3)>; +def: Pat<(HexagonEXTRACTURP I32:$src1, I64:$src2), + (S2_extractu_rp I32:$src1, I64:$src2)>; +def: Pat<(HexagonEXTRACTURP I64:$src1, I64:$src2), + (S2_extractup_rp I64:$src1, I64:$src2)>; + +// Change the sign of the immediate for Rd=-mpyi(Rs,#u8) +def: Pat<(mul (i32 IntRegs:$src1), (ineg n8ImmPred:$src2)), + (M2_mpysin IntRegs:$src1, u8ImmPred:$src2)>; + //===----------------------------------------------------------------------===// // :raw for of tableindx[bdhw] insns //===----------------------------------------------------------------------===// @@ -4748,16 +5744,26 @@ class tableidxRawMinOp> let Inst{4-0} = Rx; } -let isCodeGenOnly = 0 in { def S2_tableidxb : tableidxRaw<"tableidxb", 0b00>; def S2_tableidxh : tableidxRaw<"tableidxh", 0b01>; def S2_tableidxw : tableidxRaw<"tableidxw", 0b10>; def S2_tableidxd : tableidxRaw<"tableidxd", 0b11>; -} -// Change the sign of the immediate for Rd=-mpyi(Rs,#u8) -def : Pat <(mul (i32 IntRegs:$src1), (ineg n8ImmPred:$src2)), - (i32 (M2_mpysin (i32 IntRegs:$src1), u8ImmPred:$src2))>; +//===----------------------------------------------------------------------===// +// Template class for 'table index' instructions which are assembler mapped +// to their :raw format. +//===----------------------------------------------------------------------===// +let isPseudo = 1 in +class tableidx_goodsyntax + : SInst <(outs IntRegs:$Rx), + (ins IntRegs:$_dst_, IntRegs:$Rs, u4Imm:$u4, u5Imm:$u5), + "$Rx = "#mnemonic#"($Rs, #$u4, #$u5)", + [], "$Rx = $_dst_" >; + +def S2_tableidxb_goodsyntax : tableidx_goodsyntax<"tableidxb">; +def S2_tableidxh_goodsyntax : tableidx_goodsyntax<"tableidxh">; +def S2_tableidxw_goodsyntax : tableidx_goodsyntax<"tableidxw">; +def S2_tableidxd_goodsyntax : tableidx_goodsyntax<"tableidxd">; //===----------------------------------------------------------------------===// // V3 Instructions + @@ -4788,3 +5794,21 @@ include "HexagonInstrInfoV5.td" //===----------------------------------------------------------------------===// // V5 Instructions - //===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// V60 Instructions + +//===----------------------------------------------------------------------===// + +include "HexagonInstrInfoV60.td" + +//===----------------------------------------------------------------------===// +// V60 Instructions - +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// ALU32/64/Vector + +//===----------------------------------------------------------------------===/// + +include "HexagonInstrInfoVector.td" + +include "HexagonInstrAlias.td"