X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FHexagon%2FHexagonInstrInfoV4.td;h=65612c590bfe760081f2a49fb68a9d8945993ea9;hb=72f9544cc033e9e7316691f0f210672c9639cb14;hp=2ef3c19e17843c9dd4bf0d7d4d751e4f41df2017;hpb=91568ff3aa9a29819e94fb44d7c9f591bbfdd44e;p=oota-llvm.git diff --git a/lib/Target/Hexagon/HexagonInstrInfoV4.td b/lib/Target/Hexagon/HexagonInstrInfoV4.td index 2ef3c19e178..65612c590bf 100644 --- a/lib/Target/Hexagon/HexagonInstrInfoV4.td +++ b/lib/Target/Hexagon/HexagonInstrInfoV4.td @@ -11,6 +11,25 @@ // //===----------------------------------------------------------------------===// +def DuplexIClass0: InstDuplex < 0 >; +def DuplexIClass1: InstDuplex < 1 >; +def DuplexIClass2: InstDuplex < 2 >; +let isExtendable = 1 in { + def DuplexIClass3: InstDuplex < 3 >; + def DuplexIClass4: InstDuplex < 4 >; + def DuplexIClass5: InstDuplex < 5 >; + def DuplexIClass6: InstDuplex < 6 >; + def DuplexIClass7: InstDuplex < 7 >; +} +def DuplexIClass8: InstDuplex < 8 >; +def DuplexIClass9: InstDuplex < 9 >; +def DuplexIClassA: InstDuplex < 0xA >; +def DuplexIClassB: InstDuplex < 0xB >; +def DuplexIClassC: InstDuplex < 0xC >; +def DuplexIClassD: InstDuplex < 0xD >; +def DuplexIClassE: InstDuplex < 0xE >; +def DuplexIClassF: InstDuplex < 0xF >; + def addrga: PatLeaf<(i32 AddrGA:$Addr)>; def addrgp: PatLeaf<(i32 AddrGP:$Addr)>; @@ -38,19 +57,9 @@ def BITPOS32 : SDNodeXFormgetSExtValue(); - return XformMskToBitPosU5Imm(imm); + return XformMskToBitPosU5Imm(imm, SDLoc(N)); }]>; -// Fold (add (CONST32 tglobaladdr:$addr) ) into a global address. -def FoldGlobalAddr : ComplexPattern; - -// Fold (add (CONST32_GP tglobaladdr:$addr) ) into a global address. -def FoldGlobalAddrGP : ComplexPattern; - -def NumUsesBelowThresCONST32 : PatFrag<(ops node:$addr), - (HexagonCONST32 node:$addr), [{ - return hasNumUsesBelowThresGA(N->getOperand(0).getNode()); -}]>; // Hexagon V4 Architecture spec defines 8 instruction classes: // LD ST ALU32 XTYPE J JR MEMOP NV CR SYSTEM(system is not implemented in the @@ -122,21 +131,19 @@ class T_ALU32_3op_not MajOp, bits<3> MinOp, let AsmString = "$Rd = "#mnemonic#"($Rs, ~$Rt)"; } -let BaseOpcode = "andn_rr", CextOpcode = "andn", isCodeGenOnly = 0 in +let BaseOpcode = "andn_rr", CextOpcode = "andn" in def A4_andn : T_ALU32_3op_not<"and", 0b001, 0b100, 1>; -let BaseOpcode = "orn_rr", CextOpcode = "orn", isCodeGenOnly = 0 in +let BaseOpcode = "orn_rr", CextOpcode = "orn" in def A4_orn : T_ALU32_3op_not<"or", 0b001, 0b101, 1>; -let CextOpcode = "rcmp.eq", isCodeGenOnly = 0 in +let CextOpcode = "rcmp.eq" in def A4_rcmpeq : T_ALU32_3op<"cmp.eq", 0b011, 0b010, 0, 1>; -let CextOpcode = "!rcmp.eq", isCodeGenOnly = 0 in +let CextOpcode = "!rcmp.eq" in def A4_rcmpneq : T_ALU32_3op<"!cmp.eq", 0b011, 0b011, 0, 1>; -let isCodeGenOnly = 0 in { def C4_cmpneq : T_ALU32_3op_cmp<"!cmp.eq", 0b00, 1, 1>; def C4_cmplte : T_ALU32_3op_cmp<"!cmp.gt", 0b10, 1, 0>; def C4_cmplteu : T_ALU32_3op_cmp<"!cmp.gtu", 0b11, 1, 0>; -} // Pats for instruction selection. @@ -150,12 +157,14 @@ def: T_cmp32_rr_pat, i32>; def: T_cmp32_rr_pat, i32>; def: T_cmp32_rr_pat; +def: T_cmp32_rr_pat; + +def: T_cmp32_rr_pat, i1>; class T_CMP_rrbh MinOp, bit IsComm> : SInst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, IntRegs:$Rt), "$Pd = "#mnemonic#"($Rs, $Rt)", [], "", S_3op_tc_2early_SLOT23>, ImmRegRel { - let validSubTargets = HasV4SubT; let InputType = "reg"; let CextOpcode = mnemonic; let isCompare = 1; @@ -174,13 +183,26 @@ class T_CMP_rrbh MinOp, bit IsComm> let Inst{1-0} = Pd; } -let isCodeGenOnly = 0 in { def A4_cmpbeq : T_CMP_rrbh<"cmpb.eq", 0b110, 1>; def A4_cmpbgt : T_CMP_rrbh<"cmpb.gt", 0b010, 0>; def A4_cmpbgtu : T_CMP_rrbh<"cmpb.gtu", 0b111, 0>; def A4_cmpheq : T_CMP_rrbh<"cmph.eq", 0b011, 1>; def A4_cmphgt : T_CMP_rrbh<"cmph.gt", 0b100, 0>; def A4_cmphgtu : T_CMP_rrbh<"cmph.gtu", 0b101, 0>; + +let AddedComplexity = 100 in { + def: Pat<(i1 (seteq (and (xor (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)), + 255), 0)), + (A4_cmpbeq IntRegs:$Rs, IntRegs:$Rt)>; + def: Pat<(i1 (setne (and (xor (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)), + 255), 0)), + (C2_not (A4_cmpbeq IntRegs:$Rs, IntRegs:$Rt))>; + def: Pat<(i1 (seteq (and (xor (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)), + 65535), 0)), + (A4_cmpheq IntRegs:$Rs, IntRegs:$Rt)>; + def: Pat<(i1 (setne (and (xor (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)), + 65535), 0)), + (C2_not (A4_cmpheq IntRegs:$Rs, IntRegs:$Rt))>; } class T_CMP_ribh MajOp, bit IsHalf, bit IsComm, @@ -188,7 +210,6 @@ class T_CMP_ribh MajOp, bit IsHalf, bit IsComm, : ALU64Inst<(outs PredRegs:$Pd), (ins IntRegs:$Rs, ImmType:$Imm), "$Pd = "#mnemonic#"($Rs, #$Imm)", [], "", ALU64_tc_2early_SLOT23>, ImmRegRel { - let validSubTargets = HasV4SubT; let InputType = "imm"; let CextOpcode = mnemonic; let isCompare = 1; @@ -213,19 +234,17 @@ class T_CMP_ribh MajOp, bit IsHalf, bit IsComm, let Inst{1-0} = Pd; } -let isCodeGenOnly = 0 in { def A4_cmpbeqi : T_CMP_ribh<"cmpb.eq", 0b00, 0, 1, u8Imm, 0, 0, 8>; def A4_cmpbgti : T_CMP_ribh<"cmpb.gt", 0b01, 0, 0, s8Imm, 0, 1, 8>; def A4_cmpbgtui : T_CMP_ribh<"cmpb.gtu", 0b10, 0, 0, u7Ext, 1, 0, 7>; def A4_cmpheqi : T_CMP_ribh<"cmph.eq", 0b00, 1, 1, s8Ext, 1, 1, 8>; def A4_cmphgti : T_CMP_ribh<"cmph.gt", 0b01, 1, 0, s8Ext, 1, 1, 8>; def A4_cmphgtui : T_CMP_ribh<"cmph.gtu", 0b10, 1, 0, u7Ext, 1, 0, 7>; -} + class T_RCMP_EQ_ri : ALU32_ri<(outs IntRegs:$Rd), (ins IntRegs:$Rs, s8Ext:$s8), "$Rd = "#mnemonic#"($Rs, #$s8)", [], "", ALU32_2op_tc_1_SLOT0123>, ImmRegRel { - let validSubTargets = HasV4SubT; let InputType = "imm"; let CextOpcode = !if (IsNeg, "!rcmp.eq", "rcmp.eq"); let isExtendable = 1; @@ -248,22 +267,19 @@ class T_RCMP_EQ_ri let Inst{4-0} = Rd; } -let isCodeGenOnly = 0 in { def A4_rcmpeqi : T_RCMP_EQ_ri<"cmp.eq", 0>; def A4_rcmpneqi : T_RCMP_EQ_ri<"!cmp.eq", 1>; -} -def: Pat<(i32 (zext (i1 (seteq (i32 IntRegs:$Rs), s8ExtPred:$s8)))), - (A4_rcmpeqi IntRegs:$Rs, s8ExtPred:$s8)>; -def: Pat<(i32 (zext (i1 (setne (i32 IntRegs:$Rs), s8ExtPred:$s8)))), - (A4_rcmpneqi IntRegs:$Rs, s8ExtPred:$s8)>; +def: Pat<(i32 (zext (i1 (seteq (i32 IntRegs:$Rs), s32ImmPred:$s8)))), + (A4_rcmpeqi IntRegs:$Rs, s32ImmPred:$s8)>; +def: Pat<(i32 (zext (i1 (setne (i32 IntRegs:$Rs), s32ImmPred:$s8)))), + (A4_rcmpneqi IntRegs:$Rs, s32ImmPred:$s8)>; // Preserve the S2_tstbit_r generation def: Pat<(i32 (zext (i1 (setne (i32 (and (i32 (shl 1, (i32 IntRegs:$src2))), (i32 IntRegs:$src1))), 0)))), (C2_muxii (S2_tstbit_r IntRegs:$src1, IntRegs:$src2), 1, 0)>; - //===----------------------------------------------------------------------===// // ALU32 - //===----------------------------------------------------------------------===// @@ -291,26 +307,23 @@ class T_Combine1 MajOp, dag ins, string AsmStr> let Inst{4-0} = Rdd; } -let opExtendable = 2, isCodeGenOnly = 0 in +let opExtendable = 2 in def A4_combineri : T_Combine1<0b00, (ins IntRegs:$Rs, s8Ext:$s8), "$Rdd = combine($Rs, #$s8)">; -let opExtendable = 1, isCodeGenOnly = 0 in +let opExtendable = 1 in def A4_combineir : T_Combine1<0b01, (ins s8Ext:$s8, IntRegs:$Rs), "$Rdd = combine(#$s8, $Rs)">; -def HexagonWrapperCombineRI_V4 : - SDNode<"HexagonISD::WrapperCombineRI_V4", SDTHexagonI64I32I32>; -def HexagonWrapperCombineIR_V4 : - SDNode<"HexagonISD::WrapperCombineIR_V4", SDTHexagonI64I32I32>; +// The complexity of the combines involving immediates should be greater +// than the complexity of the combine with two registers. +let AddedComplexity = 50 in { +def: Pat<(HexagonCOMBINE IntRegs:$r, s32ImmPred:$i), + (A4_combineri IntRegs:$r, s32ImmPred:$i)>; -def : Pat <(HexagonWrapperCombineRI_V4 IntRegs:$r, s8ExtPred:$i), - (A4_combineri IntRegs:$r, s8ExtPred:$i)>, - Requires<[HasV4T]>; - -def : Pat <(HexagonWrapperCombineIR_V4 s8ExtPred:$i, IntRegs:$r), - (A4_combineir s8ExtPred:$i, IntRegs:$r)>, - Requires<[HasV4T]>; +def: Pat<(HexagonCOMBINE s32ImmPred:$i, IntRegs:$r), + (A4_combineir s32ImmPred:$i, IntRegs:$r)>; +} // A4_combineii: Set two small immediates. let hasSideEffects = 0, isExtendable = 1, opExtentBits = 6, opExtendable = 2 in @@ -328,6 +341,12 @@ def A4_combineii: ALU32Inst<(outs DoubleRegs:$Rdd), (ins s8Imm:$s8, u6Ext:$U6), let Inst{4-0} = Rdd; } +// The complexity of the combine with two immediates should be greater than +// the complexity of a combine involving a register. +let AddedComplexity = 75 in +def: Pat<(HexagonCOMBINE s8ImmPred:$s8, u32ImmPred:$u6), + (A4_combineii imm:$s8, imm:$u6)>; + //===----------------------------------------------------------------------===// // ALU32/PERM - //===----------------------------------------------------------------------===// @@ -349,20 +368,22 @@ multiclass Loadxm_pat { def: Pat<(VT (Load AddrFI:$fi)), (VT (ValueMod (MI AddrFI:$fi, 0)))>; + def: Pat<(VT (Load (add AddrFI:$fi, ImmPred:$Off))), + (VT (ValueMod (MI AddrFI:$fi, imm:$Off)))>; def: Pat<(VT (Load (add IntRegs:$Rs, ImmPred:$Off))), (VT (ValueMod (MI IntRegs:$Rs, imm:$Off)))>; def: Pat<(VT (Load (i32 IntRegs:$Rs))), (VT (ValueMod (MI IntRegs:$Rs, 0)))>; } -defm: Loadxm_pat; -defm: Loadxm_pat; -defm: Loadxm_pat; -defm: Loadxm_pat; -defm: Loadxm_pat; -defm: Loadxm_pat; -defm: Loadxm_pat; -defm: Loadxm_pat; +defm: Loadxm_pat; +defm: Loadxm_pat; +defm: Loadxm_pat; +defm: Loadxm_pat; +defm: Loadxm_pat; +defm: Loadxm_pat; +defm: Loadxm_pat; +defm: Loadxm_pat; // Map Rdd = anyext(Rs) -> Rdd = combine(#0, Rs). def: Pat<(i64 (anyext (i32 IntRegs:$src1))), (Zext64 IntRegs:$src1)>; @@ -392,21 +413,35 @@ class T_LD_abs_setMajOp>: let Inst{6-5} = addr{1-0}; } -let accessSize = ByteAccess, hasNewValue = 1, isCodeGenOnly = 0 in { +let accessSize = ByteAccess, hasNewValue = 1 in { def L4_loadrb_ap : T_LD_abs_set <"memb", IntRegs, 0b1000>; def L4_loadrub_ap : T_LD_abs_set <"memub", IntRegs, 0b1001>; } -let accessSize = HalfWordAccess, hasNewValue = 1, isCodeGenOnly = 0 in { +let accessSize = HalfWordAccess, hasNewValue = 1 in { def L4_loadrh_ap : T_LD_abs_set <"memh", IntRegs, 0b1010>; def L4_loadruh_ap : T_LD_abs_set <"memuh", IntRegs, 0b1011>; + def L4_loadbsw2_ap : T_LD_abs_set <"membh", IntRegs, 0b0001>; + def L4_loadbzw2_ap : T_LD_abs_set <"memubh", IntRegs, 0b0011>; } -let accessSize = WordAccess, hasNewValue = 1, isCodeGenOnly = 0 in +let accessSize = WordAccess, hasNewValue = 1 in def L4_loadri_ap : T_LD_abs_set <"memw", IntRegs, 0b1100>; -let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in +let accessSize = WordAccess in { + def L4_loadbzw4_ap : T_LD_abs_set <"memubh", DoubleRegs, 0b0101>; + def L4_loadbsw4_ap : T_LD_abs_set <"membh", DoubleRegs, 0b0111>; +} + +let accessSize = DoubleWordAccess in def L4_loadrd_ap : T_LD_abs_set <"memd", DoubleRegs, 0b1110>; + +let accessSize = ByteAccess in + def L4_loadalignb_ap : T_LD_abs_set <"memb_fifo", DoubleRegs, 0b0100>; + +let accessSize = HalfWordAccess in +def L4_loadalignh_ap : T_LD_abs_set <"memh_fifo", DoubleRegs, 0b0010>; + // Load - Indirect with long offset let InputType = "imm", addrMode = BaseLongOffset, isExtended = 1, opExtentBits = 6, opExtendable = 3 in @@ -434,14 +469,14 @@ class T_LoadAbsReg ; def L4_loadrub_ur : T_LoadAbsReg<"memub", "LDriub", IntRegs, 0b1001>; def L4_loadalignb_ur : T_LoadAbsReg<"memb_fifo", "LDrib_fifo", DoubleRegs, 0b0100>; } -let accessSize = HalfWordAccess, isCodeGenOnly = 0 in { +let accessSize = HalfWordAccess in { def L4_loadrh_ur : T_LoadAbsReg<"memh", "LDrih", IntRegs, 0b1010>; def L4_loadruh_ur : T_LoadAbsReg<"memuh", "LDriuh", IntRegs, 0b1011>; def L4_loadbsw2_ur : T_LoadAbsReg<"membh", "LDribh2", IntRegs, 0b0001>; @@ -450,13 +485,13 @@ let accessSize = HalfWordAccess, isCodeGenOnly = 0 in { DoubleRegs, 0b0010>; } -let accessSize = WordAccess, isCodeGenOnly = 0 in { +let accessSize = WordAccess in { def L4_loadri_ur : T_LoadAbsReg<"memw", "LDriw", IntRegs, 0b1100>; def L4_loadbsw4_ur : T_LoadAbsReg<"membh", "LDribh4", DoubleRegs, 0b0111>; def L4_loadbzw4_ur : T_LoadAbsReg<"memubh", "LDriubh4", DoubleRegs, 0b0101>; } -let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in +let accessSize = DoubleWordAccess in def L4_loadrd_ur : T_LoadAbsReg<"memd", "LDrid", DoubleRegs, 0b1110>; @@ -464,10 +499,23 @@ multiclass T_LoadAbsReg_Pat { def : Pat <(VT (ldOp (add (shl IntRegs:$src1, u2ImmPred:$src2), (HexagonCONST32 tglobaladdr:$src3)))), (MI IntRegs:$src1, u2ImmPred:$src2, tglobaladdr:$src3)>; - def : Pat <(VT (ldOp (add IntRegs:$src1, (HexagonCONST32 tglobaladdr:$src2)))), (MI IntRegs:$src1, 0, tglobaladdr:$src2)>; + + def : Pat <(VT (ldOp (add (shl IntRegs:$src1, u2ImmPred:$src2), + (HexagonCONST32 tconstpool:$src3)))), + (MI IntRegs:$src1, u2ImmPred:$src2, tconstpool:$src3)>; + def : Pat <(VT (ldOp (add IntRegs:$src1, + (HexagonCONST32 tconstpool:$src2)))), + (MI IntRegs:$src1, 0, tconstpool:$src2)>; + + def : Pat <(VT (ldOp (add (shl IntRegs:$src1, u2ImmPred:$src2), + (HexagonCONST32 tjumptable:$src3)))), + (MI IntRegs:$src1, u2ImmPred:$src2, tjumptable:$src3)>; + def : Pat <(VT (ldOp (add IntRegs:$src1, + (HexagonCONST32 tjumptable:$src2)))), + (MI IntRegs:$src1, 0, tjumptable:$src2)>; } let AddedComplexity = 60 in { @@ -564,20 +612,20 @@ multiclass ld_idxd_shl ; defm loadrub : ld_idxd_shl<"memub", "LDriub", IntRegs, 0b001>; } -let hasNewValue = 1, accessSize = HalfWordAccess, isCodeGenOnly = 0 in { +let hasNewValue = 1, accessSize = HalfWordAccess in { defm loadrh : ld_idxd_shl<"memh", "LDrih", IntRegs, 0b010>; defm loadruh : ld_idxd_shl<"memuh", "LDriuh", IntRegs, 0b011>; } -let hasNewValue = 1, accessSize = WordAccess, isCodeGenOnly = 0 in +let hasNewValue = 1, accessSize = WordAccess in defm loadri : ld_idxd_shl<"memw", "LDriw", IntRegs, 0b100>; -let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in +let accessSize = DoubleWordAccess in defm loadrd : ld_idxd_shl<"memd", "LDrid", DoubleRegs, 0b110>; // 'def pats' for load instructions with base + register offset and non-zero @@ -624,30 +672,6 @@ def: Pat<(i64 (zext (i1 PredRegs:$src1))), def: Pat<(i64 (zext (i32 IntRegs:$src1))), (Zext64 IntRegs:$src1)>; -// zext i32->i64 -def: Pat <(i64 (zextloadi32 ADDRriS11_2:$src1)), - (i64 (A4_combineir 0, (L2_loadri_io AddrFI:$src1, 0)))>, - Requires<[HasV4T]>; - -let AddedComplexity = 100 in -def: Pat <(i64 (zextloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))), - (i64 (A4_combineir 0, (L2_loadri_io IntRegs:$src1, - s11_2ExtPred:$offset)))>, - Requires<[HasV4T]>; - -// anyext i32->i64 -def: Pat <(i64 (extloadi32 ADDRriS11_2:$src1)), - (i64 (A4_combineir 0, (L2_loadri_io AddrFI:$src1, 0)))>, - Requires<[HasV4T]>; - -let AddedComplexity = 100 in -def: Pat <(i64 (extloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))), - (i64 (A4_combineir 0, (L2_loadri_io IntRegs:$src1, - s11_2ExtPred:$offset)))>, - Requires<[HasV4T]>; - - - //===----------------------------------------------------------------------===// // LD - //===----------------------------------------------------------------------===// @@ -660,7 +684,7 @@ def: Pat <(i64 (extloadi32 (i32 (add IntRegs:$src1, s11_2ExtPred:$offset)))), // Template class for store instructions with Absolute set addressing mode. //===----------------------------------------------------------------------===// let isExtended = 1, opExtendable = 1, opExtentBits = 6, - addrMode = AbsoluteSet, isNVStorable = 1 in + addrMode = AbsoluteSet in class T_ST_absset MajOp, MemAccessSize AccessSz, bit isHalf = 0> : STInst<(outs IntRegs:$dst), @@ -672,6 +696,9 @@ class T_ST_absset MajOp, let Inst{5-0} = addr; } -let mayStore = 1, addrMode = AbsoluteSet, isCodeGenOnly = 0 in { +let mayStore = 1, addrMode = AbsoluteSet in { def S4_storerbnew_ap : T_ST_absset_nv <"memb", "STrib", 0b00, ByteAccess>; def S4_storerhnew_ap : T_ST_absset_nv <"memh", "STrih", 0b01, HalfWordAccess>; def S4_storerinew_ap : T_ST_absset_nv <"memw", "STriw", 0b10, WordAccess>; } let isExtended = 1, opExtendable = 2, opExtentBits = 6, InputType = "imm", -addrMode = BaseLongOffset, AddedComplexity = 40 in + addrMode = BaseLongOffset, AddedComplexity = 40 in class T_StoreAbsReg MajOp, MemAccessSize AccessSz, bit isHalf = 0> : STInst<(outs), @@ -742,6 +769,10 @@ class T_StoreAbsReg ; def S4_storerh_ur : T_StoreAbsReg <"memh", "STrih", IntRegs, 0b010, HalfWordAccess>; @@ -763,15 +793,14 @@ def S4_storerf_ur : T_StoreAbsReg <"memh", "STrif", IntRegs, 0b011, def S4_storeri_ur : T_StoreAbsReg <"memw", "STriw", IntRegs, 0b100, WordAccess>; def S4_storerd_ur : T_StoreAbsReg <"memd", "STrid", DoubleRegs, 0b110, DoubleWordAccess>; -} let AddedComplexity = 40 in multiclass T_StoreAbsReg_Pats { def : Pat<(stOp (VT RC:$src4), (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), - u0AlwaysExtPred:$src3)), - (MI IntRegs:$src1, u2ImmPred:$src2, u0AlwaysExtPred:$src3, RC:$src4)>; + u32ImmPred:$src3)), + (MI IntRegs:$src1, u2ImmPred:$src2, u32ImmPred:$src3, RC:$src4)>; def : Pat<(stOp (VT RC:$src4), (add (shl IntRegs:$src1, u2ImmPred:$src2), @@ -815,11 +844,9 @@ class T_StoreAbsRegNV MajOp, let Inst{5-0} = src3; } -let isCodeGenOnly = 0 in { def S4_storerbnew_ur : T_StoreAbsRegNV <"memb", "STrib", 0b00, ByteAccess>; def S4_storerhnew_ur : T_StoreAbsRegNV <"memh", "STrih", 0b01, HalfWordAccess>; def S4_storerinew_ur : T_StoreAbsRegNV <"memw", "STriw", 0b10, WordAccess>; -} //===----------------------------------------------------------------------===// // Template classes for the non-predicated store instructions with @@ -836,6 +863,9 @@ class T_store_rr MajOp, bit isH> bits<2> u2; bits<5> Rt; + // Store upper-half and store doubleword cannot be NV. + let isNVStorable = !if (!eq(mnemonic, "memd"), 0, !if(isH,0,1)); + let IClass = 0b0011; let Inst{27-24} = 0b1011; @@ -868,6 +898,8 @@ class T_pstore_rr MajOp, let isPredicatedFalse = isNot; let isPredicatedNew = isPredNew; + // Store upper-half and store doubleword cannot be NV. + let isNVStorable = !if (!eq(mnemonic, "memd"), 0, !if(isH,0,1)); let IClass = 0b0011; @@ -983,8 +1015,7 @@ multiclass ST_Idxd_shl_nv , ST_Idxd_shl_nv<"memb", "STrib", IntRegs, 0b00>; @@ -1118,8 +1149,8 @@ multiclass ST_Imm ; @@ -1130,22 +1161,49 @@ let hasSideEffects = 0, validSubTargets = HasV4SubT, addrMode = BaseImmOffset, defm S4_storeiri : ST_Imm<"memw", "STriw", u6_2Imm, 0b10>; } -let Predicates = [HasV4T], AddedComplexity = 10 in { -def: Pat<(truncstorei8 s8ExtPred:$src3, (add IntRegs:$src1, u6_0ImmPred:$src2)), - (S4_storeirb_io IntRegs:$src1, u6_0ImmPred:$src2, s8ExtPred:$src3)>; +def IMM_BYTE : SDNodeXFormgetSExtValue(); + return CurDAG->getTargetConstant(imm, SDLoc(N), MVT::i32); +}]>; -def: Pat<(truncstorei16 s8ExtPred:$src3, (add IntRegs:$src1, - u6_1ImmPred:$src2)), - (S4_storeirh_io IntRegs:$src1, u6_1ImmPred:$src2, s8ExtPred:$src3)>; +def IMM_HALF : SDNodeXFormgetSExtValue(); + return CurDAG->getTargetConstant(imm, SDLoc(N), MVT::i32); +}]>; -def: Pat<(store s8ExtPred:$src3, (add IntRegs:$src1, u6_2ImmPred:$src2)), - (S4_storeiri_io IntRegs:$src1, u6_2ImmPred:$src2, s8ExtPred:$src3)>; -} +def IMM_WORD : SDNodeXFormgetSExtValue(); + return CurDAG->getTargetConstant(imm, SDLoc(N), MVT::i32); +}]>; -let AddedComplexity = 6 in -def : Pat <(truncstorei8 s8ExtPred:$src2, (i32 IntRegs:$src1)), - (S4_storeirb_io IntRegs:$src1, 0, s8ExtPred:$src2)>, - Requires<[HasV4T]>; +def ToImmByte : OutPatFrag<(ops node:$R), (IMM_BYTE $R)>; +def ToImmHalf : OutPatFrag<(ops node:$R), (IMM_HALF $R)>; +def ToImmWord : OutPatFrag<(ops node:$R), (IMM_WORD $R)>; + +let AddedComplexity = 40 in { + // Not using frameindex patterns for these stores, because the offset + // is not extendable. This could cause problems during removing the frame + // indices, since the offset with respect to R29/R30 may not fit in the + // u6 field. + def: Storexm_add_pat; + def: Storexm_add_pat; + def: Storexm_add_pat; +} + +def: Storexm_simple_pat; +def: Storexm_simple_pat; +def: Storexm_simple_pat; // memb(Rx++#s4:0:circ(Mu))=Rt // memb(Rx++I:circ(Mu))=Rt @@ -1153,16 +1211,10 @@ def : Pat <(truncstorei8 s8ExtPred:$src2, (i32 IntRegs:$src1)), // memb(Rx++Mu:brev)=Rt // memb(gp+#u16:0)=Rt - // Store halfword. // TODO: needs to be implemented // memh(Re=#U6)=Rt.H // memh(Rs+#s11:1)=Rt.H -let AddedComplexity = 6 in -def : Pat <(truncstorei16 s8ExtPred:$src2, (i32 IntRegs:$src1)), - (S4_storeirh_io IntRegs:$src1, 0, s8ExtPred:$src2)>, - Requires<[HasV4T]>; - // memh(Rs+Ru<<#u2)=Rt.H // TODO: needs to be implemented. @@ -1179,7 +1231,6 @@ def : Pat <(truncstorei16 s8ExtPred:$src2, (i32 IntRegs:$src1)), // if ([!]Pv[.new]) memh(#u6)=Rt.H // if ([!]Pv[.new]) memh(#u6)=Rt - // if ([!]Pv[.new]) memh(Rs+#u6:1)=Rt.H // TODO: needs to be implemented. @@ -1189,12 +1240,6 @@ def : Pat <(truncstorei16 s8ExtPred:$src2, (i32 IntRegs:$src1)), // Store word. // memw(Re=#U6)=Rt // TODO: Needs to be implemented. - -let AddedComplexity = 6 in -def : Pat <(store s8ExtPred:$src2, (i32 IntRegs:$src1)), - (S4_storeiri_io IntRegs:$src1, 0, s8ExtPred:$src2)>, - Requires<[HasV4T]>; - // memw(Rx++#s4:2)=Rt // memw(Rx++#s4:2:circ(Mu))=Rt // memw(Rx++I:circ(Mu))=Rt @@ -1309,7 +1354,7 @@ multiclass ST_Idxd_nv, AddrModeRel; @@ -1327,18 +1372,42 @@ let addrMode = BaseImmOffset, InputType = "imm", isCodeGenOnly = 0 in { // Post increment loads with register offset. //===----------------------------------------------------------------------===// -let hasNewValue = 1, isCodeGenOnly = 0 in +let hasNewValue = 1 in def L2_loadbsw2_pr : T_load_pr <"membh", IntRegs, 0b0001, HalfWordAccess>; -let isCodeGenOnly = 0 in def L2_loadbsw4_pr : T_load_pr <"membh", DoubleRegs, 0b0111, WordAccess>; +let hasSideEffects = 0, addrMode = PostInc in +class T_loadalign_pr MajOp, MemAccessSize AccessSz> + : LDInstPI <(outs DoubleRegs:$dst, IntRegs:$_dst_), + (ins DoubleRegs:$src1, IntRegs:$src2, ModRegs:$src3), + "$dst = "#mnemonic#"($src2++$src3)", [], + "$src1 = $dst, $src2 = $_dst_"> { + bits<5> dst; + bits<5> src2; + bits<1> src3; + + let accessSize = AccessSz; + let IClass = 0b1001; + + let Inst{27-25} = 0b110; + let Inst{24-21} = MajOp; + let Inst{20-16} = src2; + let Inst{13} = src3; + let Inst{12} = 0b0; + let Inst{7} = 0b0; + let Inst{4-0} = dst; + } + +def L2_loadalignb_pr : T_loadalign_pr <"memb_fifo", 0b0100, ByteAccess>; +def L2_loadalignh_pr : T_loadalign_pr <"memh_fifo", 0b0010, HalfWordAccess>; + //===----------------------------------------------------------------------===// // Template class for non-predicated post increment .new stores // mem[bhwd](Rx++#s4:[0123])=Nt.new //===----------------------------------------------------------------------===// -let isPredicable = 1, hasSideEffects = 0, validSubTargets = HasV4SubT, - addrMode = PostInc, isNVStore = 1, isNewValue = 1, opNewValue = 3 in +let isPredicable = 1, hasSideEffects = 0, addrMode = PostInc, isNVStore = 1, + isNewValue = 1, opNewValue = 3 in class T_StorePI_nv MajOp > : NVInstPI_V4 <(outs IntRegs:$_dst_), (ins IntRegs:$src1, ImmOp:$offset, IntRegs:$src2), @@ -1370,8 +1439,8 @@ class T_StorePI_nv MajOp > // Template class for predicated post increment .new stores // if([!]Pv[.new]) mem[bhwd](Rx++#s4:[0123])=Nt.new //===----------------------------------------------------------------------===// -let isPredicated = 1, hasSideEffects = 0, validSubTargets = HasV4SubT, - addrMode = PostInc, isNVStore = 1, isNewValue = 1, opNewValue = 4 in +let isPredicated = 1, hasSideEffects = 0, addrMode = PostInc, isNVStore = 1, + isNewValue = 1, opNewValue = 4 in class T_StorePI_nv_pred MajOp, bit isPredNot, bit isPredNew > : NVInstPI_V4 <(outs IntRegs:$_dst_), @@ -1426,13 +1495,13 @@ multiclass ST_PostInc_nv; -let accessSize = HalfWordAccess, isCodeGenOnly = 0 in +let accessSize = HalfWordAccess in defm storerhnew: ST_PostInc_nv <"memh", "STrih", s4_1Imm, 0b01>; -let accessSize = WordAccess, isCodeGenOnly = 0 in +let accessSize = WordAccess in defm storerinew: ST_PostInc_nv <"memw", "STriw", s4_2Imm, 0b10>; //===----------------------------------------------------------------------===// @@ -1459,15 +1528,12 @@ class T_StorePI_RegNV MajOp, MemAccessSize AccessSz> let Inst{7} = 0b0; } -let isCodeGenOnly = 0 in { def S2_storerbnew_pr : T_StorePI_RegNV<"memb", 0b00, ByteAccess>; def S2_storerhnew_pr : T_StorePI_RegNV<"memh", 0b01, HalfWordAccess>; def S2_storerinew_pr : T_StorePI_RegNV<"memw", 0b10, WordAccess>; -} // memb(Rx++#s4:0:circ(Mu))=Nt.new // memb(Rx++I:circ(Mu))=Nt.new -// memb(Rx++Mu)=Nt.new // memb(Rx++Mu:brev)=Nt.new // memh(Rx++#s4:1:circ(Mu))=Nt.new // memh(Rx++I:circ(Mu))=Nt.new @@ -1517,7 +1583,7 @@ class NVJrr_template majOp, bit NvOpNum, let RegOp = !if(!eq(NvOpNum, 0), src2, src1); let IClass = 0b0010; - let Inst{26} = 0b0; + let Inst{27-26} = 0b00; let Inst{25-23} = majOp; let Inst{22} = isNegCond; let Inst{18-16} = Ns; @@ -1531,9 +1597,9 @@ class NVJrr_template majOp, bit NvOpNum, multiclass NVJrr_cond majOp, bit NvOpNum, bit isNegCond> { // Branch not taken: - def _nt_V4: NVJrr_template; + def _nt: NVJrr_template; // Branch taken: - def _t_V4: NVJrr_template; + def _t : NVJrr_template; } // NvOpNum = 0 -> First Operand is a new-value Register @@ -1542,8 +1608,8 @@ multiclass NVJrr_cond majOp, bit NvOpNum, multiclass NVJrr_base majOp, bit NvOpNum> { let BaseOpcode = BaseOp#_NVJ in { - defm _t_Jumpnv : NVJrr_cond; // True cond - defm _f_Jumpnv : NVJrr_cond; // False cond + defm _t_jumpnv : NVJrr_cond; // True cond + defm _f_jumpnv : NVJrr_cond; // False cond } } @@ -1554,13 +1620,12 @@ multiclass NVJrr_base majOp, // if ([!]cmp.gtu(Rt,Ns.new)) jump:[n]t #r9:2 let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator = 1, - Defs = [PC], hasSideEffects = 0, validSubTargets = HasV4SubT, - isCodeGenOnly = 0 in { - defm CMPEQrr : NVJrr_base<"cmp.eq", "CMPEQ", 0b000, 0>, PredRel; - defm CMPGTrr : NVJrr_base<"cmp.gt", "CMPGT", 0b001, 0>, PredRel; - defm CMPGTUrr : NVJrr_base<"cmp.gtu", "CMPGTU", 0b010, 0>, PredRel; - defm CMPLTrr : NVJrr_base<"cmp.gt", "CMPLT", 0b011, 1>, PredRel; - defm CMPLTUrr : NVJrr_base<"cmp.gtu", "CMPLTU", 0b100, 1>, PredRel; + Defs = [PC], hasSideEffects = 0 in { + defm J4_cmpeq : NVJrr_base<"cmp.eq", "CMPEQ", 0b000, 0>, PredRel; + defm J4_cmpgt : NVJrr_base<"cmp.gt", "CMPGT", 0b001, 0>, PredRel; + defm J4_cmpgtu : NVJrr_base<"cmp.gtu", "CMPGTU", 0b010, 0>, PredRel; + defm J4_cmplt : NVJrr_base<"cmp.gt", "CMPLT", 0b011, 1>, PredRel; + defm J4_cmpltu : NVJrr_base<"cmp.gtu", "CMPLTU", 0b100, 1>, PredRel; } //===----------------------------------------------------------------------===// @@ -1598,15 +1663,15 @@ class NVJri_template majOp, bit isNegCond, multiclass NVJri_cond majOp, bit isNegCond> { // Branch not taken: - def _nt_V4: NVJri_template; + def _nt: NVJri_template; // Branch taken: - def _t_V4: NVJri_template; + def _t : NVJri_template; } multiclass NVJri_base majOp> { let BaseOpcode = BaseOp#_NVJri in { - defm _t_Jumpnv : NVJri_cond; // True Cond - defm _f_Jumpnv : NVJri_cond; // False cond + defm _t_jumpnv : NVJri_cond; // True Cond + defm _f_jumpnv : NVJri_cond; // False cond } } @@ -1615,11 +1680,10 @@ multiclass NVJri_base majOp> { // if ([!]cmp.gtu(Ns.new,#U5)) jump:[n]t #r9:2 let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator = 1, - Defs = [PC], hasSideEffects = 0, validSubTargets = HasV4SubT, - isCodeGenOnly = 0 in { - defm CMPEQri : NVJri_base<"cmp.eq", "CMPEQ", 0b000>, PredRel; - defm CMPGTri : NVJri_base<"cmp.gt", "CMPGT", 0b001>, PredRel; - defm CMPGTUri : NVJri_base<"cmp.gtu", "CMPGTU", 0b010>, PredRel; + Defs = [PC], hasSideEffects = 0 in { + defm J4_cmpeqi : NVJri_base<"cmp.eq", "CMPEQ", 0b000>, PredRel; + defm J4_cmpgti : NVJri_base<"cmp.gt", "CMPGT", 0b001>, PredRel; + defm J4_cmpgtui : NVJri_base<"cmp.gtu", "CMPGTU", 0b010>, PredRel; } //===----------------------------------------------------------------------===// @@ -1656,16 +1720,16 @@ class NVJ_ConstImm_template majOp, string ImmVal, multiclass NVJ_ConstImm_cond majOp, string ImmVal, bit isNegCond> { // Branch not taken: - def _nt_V4: NVJ_ConstImm_template; + def _nt: NVJ_ConstImm_template; // Branch taken: - def _t_V4: NVJ_ConstImm_template; + def _t : NVJ_ConstImm_template; } multiclass NVJ_ConstImm_base majOp, string ImmVal> { let BaseOpcode = BaseOp#_NVJ_ConstImm in { - defm _t_Jumpnv : NVJ_ConstImm_cond; // True - defm _f_Jumpnv : NVJ_ConstImm_cond; // False + defm _t_jumpnv : NVJ_ConstImm_cond; // True + defm _f_jumpnv : NVJ_ConstImm_cond; // False } } @@ -1674,14 +1738,14 @@ multiclass NVJ_ConstImm_base majOp, // if ([!]cmp.gt(Ns.new,#-1)) jump:[n]t #r9:2 let isPredicated = 1, isBranch = 1, isNewValue = 1, isTerminator=1, - Defs = [PC], hasSideEffects = 0, isCodeGenOnly = 0 in { - defm TSTBIT0 : NVJ_ConstImm_base<"tstbit", "TSTBIT", 0b011, "0">, PredRel; - defm CMPEQn1 : NVJ_ConstImm_base<"cmp.eq", "CMPEQ", 0b100, "-1">, PredRel; - defm CMPGTn1 : NVJ_ConstImm_base<"cmp.gt", "CMPGT", 0b101, "-1">, PredRel; + Defs = [PC], hasSideEffects = 0 in { + defm J4_tstbit0 : NVJ_ConstImm_base<"tstbit", "TSTBIT", 0b011, "0">, PredRel; + defm J4_cmpeqn1 : NVJ_ConstImm_base<"cmp.eq", "CMPEQ", 0b100, "-1">, PredRel; + defm J4_cmpgtn1 : NVJ_ConstImm_base<"cmp.gt", "CMPGT", 0b101, "-1">, PredRel; } // J4_hintjumpr: Hint indirect conditional jump. -let isBranch = 1, isIndirectBranch = 1, hasSideEffects = 0, isCodeGenOnly = 0 in +let isBranch = 1, isIndirectBranch = 1, hasSideEffects = 0 in def J4_hintjumpr: JRInst < (outs), (ins IntRegs:$Rs), @@ -1702,8 +1766,7 @@ def J4_hintjumpr: JRInst < // PC-relative add let hasNewValue = 1, isExtendable = 1, opExtendable = 1, - isExtentSigned = 0, opExtentBits = 6, hasSideEffects = 0, - Uses = [PC], validSubTargets = HasV4SubT, isCodeGenOnly = 0 in + isExtentSigned = 0, opExtentBits = 6, hasSideEffects = 0, Uses = [PC] in def C4_addipc : CRInst <(outs IntRegs:$Rd), (ins u6Ext:$u6), "$Rd = add(pc, #$u6)", [], "", CR_tc_2_SLOT3 > { bits<5> Rd; @@ -1741,7 +1804,6 @@ class T_LOGICAL_3OP OpBits, bit IsNeg> let Inst{1-0} = Pd; } -let isCodeGenOnly = 0 in { def C4_and_and : T_LOGICAL_3OP<"and", "and", 0b00, 0>; def C4_and_or : T_LOGICAL_3OP<"and", "or", 0b01, 0>; def C4_or_and : T_LOGICAL_3OP<"or", "and", 0b10, 0>; @@ -1750,7 +1812,69 @@ def C4_and_andn : T_LOGICAL_3OP<"and", "and", 0b00, 1>; def C4_and_orn : T_LOGICAL_3OP<"and", "or", 0b01, 1>; def C4_or_andn : T_LOGICAL_3OP<"or", "and", 0b10, 1>; def C4_or_orn : T_LOGICAL_3OP<"or", "or", 0b11, 1>; -} + +// op(Ps, op(Pt, Pu)) +class LogLog_pat + : Pat<(i1 (Op1 I1:$Ps, (Op2 I1:$Pt, I1:$Pu))), + (MI I1:$Ps, I1:$Pt, I1:$Pu)>; + +// op(Ps, op(Pt, ~Pu)) +class LogLogNot_pat + : Pat<(i1 (Op1 I1:$Ps, (Op2 I1:$Pt, (not I1:$Pu)))), + (MI I1:$Ps, I1:$Pt, I1:$Pu)>; + +def: LogLog_pat; +def: LogLog_pat; +def: LogLog_pat; +def: LogLog_pat; + +def: LogLogNot_pat; +def: LogLogNot_pat; +def: LogLogNot_pat; +def: LogLogNot_pat; + +//===----------------------------------------------------------------------===// +// PIC: Support for PIC compilations. The patterns and SD nodes defined +// below are needed to support code generation for PIC +//===----------------------------------------------------------------------===// + +def SDT_HexagonPICAdd + : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; +def SDT_HexagonGOTAdd + : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; + +def SDT_HexagonGOTAddInternal : SDTypeProfile<1, 1, [SDTCisVT<0, i32>]>; +def SDT_HexagonGOTAddInternalJT : SDTypeProfile<1, 1, [SDTCisVT<0, i32>]>; +def SDT_HexagonGOTAddInternalBA : SDTypeProfile<1, 1, [SDTCisVT<0, i32>]>; + +def Hexagonpic_add : SDNode<"HexagonISD::PIC_ADD", SDT_HexagonPICAdd>; +def Hexagonat_got : SDNode<"HexagonISD::AT_GOT", SDT_HexagonGOTAdd>; +def Hexagongat_pcrel : SDNode<"HexagonISD::AT_PCREL", + SDT_HexagonGOTAddInternal>; +def Hexagongat_pcrel_jt : SDNode<"HexagonISD::AT_PCREL", + SDT_HexagonGOTAddInternalJT>; +def Hexagongat_pcrel_ba : SDNode<"HexagonISD::AT_PCREL", + SDT_HexagonGOTAddInternalBA>; + +// PIC: Map from a block address computation to a PC-relative add +def: Pat<(Hexagongat_pcrel_ba tblockaddress:$src1), + (C4_addipc u32ImmPred:$src1)>; + +// PIC: Map from the computation to generate a GOT pointer to a PC-relative add +def: Pat<(Hexagonpic_add texternalsym:$src1), + (C4_addipc u32ImmPred:$src1)>; + +// PIC: Map from a jump table address computation to a PC-relative add +def: Pat<(Hexagongat_pcrel_jt tjumptable:$src1), + (C4_addipc u32ImmPred:$src1)>; + +// PIC: Map from a GOT-relative symbol reference to a load +def: Pat<(Hexagonat_got (i32 IntRegs:$src1), tglobaladdr:$src2), + (L2_loadri_io IntRegs:$src1, s30_2ImmPred:$src2)>; + +// PIC: Map from a static symbol reference to a PC-relative add +def: Pat<(Hexagongat_pcrel tglobaladdr:$src1), + (C4_addipc u32ImmPred:$src1)>; //===----------------------------------------------------------------------===// // CR - @@ -1761,12 +1885,15 @@ def C4_or_orn : T_LOGICAL_3OP<"or", "or", 0b11, 1>; //===----------------------------------------------------------------------===// // Logical with-not instructions. -let validSubTargets = HasV4SubT, isCodeGenOnly = 0 in { - def A4_andnp : T_ALU64_logical<"and", 0b001, 1, 0, 1>; - def A4_ornp : T_ALU64_logical<"or", 0b011, 1, 0, 1>; -} +def A4_andnp : T_ALU64_logical<"and", 0b001, 1, 0, 1>; +def A4_ornp : T_ALU64_logical<"or", 0b011, 1, 0, 1>; -let hasNewValue = 1, hasSideEffects = 0, isCodeGenOnly = 0 in +def: Pat<(i64 (and (i64 DoubleRegs:$Rs), (i64 (not (i64 DoubleRegs:$Rt))))), + (A4_andnp DoubleRegs:$Rs, DoubleRegs:$Rt)>; +def: Pat<(i64 (or (i64 DoubleRegs:$Rs), (i64 (not (i64 DoubleRegs:$Rt))))), + (A4_ornp DoubleRegs:$Rs, DoubleRegs:$Rt)>; + +let hasNewValue = 1, hasSideEffects = 0 in def S4_parity: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), "$Rd = parity($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> { bits<5> Rd; @@ -1779,15 +1906,16 @@ def S4_parity: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), let Inst{12-8} = Rt; let Inst{4-0} = Rd; } + // Add and accumulate. // Rd=add(Rs,add(Ru,#s6)) let isExtentSigned = 1, hasNewValue = 1, isExtendable = 1, opExtentBits = 6, - opExtendable = 3, isCodeGenOnly = 0 in + opExtendable = 3 in def S4_addaddi : ALU64Inst <(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Ru, s6Ext:$s6), "$Rd = add($Rs, add($Ru, #$s6))" , [(set (i32 IntRegs:$Rd), (add (i32 IntRegs:$Rs), - (add (i32 IntRegs:$Ru), s6_16ExtPred:$s6)))], + (add (i32 IntRegs:$Ru), s32ImmPred:$s6)))], "", ALU64_tc_2_SLOT23> { bits<5> Rd; bits<5> Rs; @@ -1806,7 +1934,7 @@ def S4_addaddi : ALU64Inst <(outs IntRegs:$Rd), } let isExtentSigned = 1, hasSideEffects = 0, hasNewValue = 1, isExtendable = 1, - opExtentBits = 6, opExtendable = 2, isCodeGenOnly = 0 in + opExtentBits = 6, opExtendable = 2 in def S4_subaddi: ALU64Inst <(outs IntRegs:$Rd), (ins IntRegs:$Rs, s6Ext:$s6, IntRegs:$Ru), "$Rd = add($Rs, sub(#$s6, $Ru))", @@ -1826,44 +1954,64 @@ def S4_subaddi: ALU64Inst <(outs IntRegs:$Rd), let Inst{7-5} = s6{2-0}; let Inst{4-0} = Ru; } - + +// Rd=add(Rs,sub(#s6,Ru)) +def: Pat<(add (i32 IntRegs:$src1), (sub s32ImmPred:$src2, + (i32 IntRegs:$src3))), + (S4_subaddi IntRegs:$src1, s32ImmPred:$src2, IntRegs:$src3)>; + +// Rd=sub(add(Rs,#s6),Ru) +def: Pat<(sub (add (i32 IntRegs:$src1), s32ImmPred:$src2), + (i32 IntRegs:$src3)), + (S4_subaddi IntRegs:$src1, s32ImmPred:$src2, IntRegs:$src3)>; + +// Rd=add(sub(Rs,Ru),#s6) +def: Pat<(add (sub (i32 IntRegs:$src1), (i32 IntRegs:$src3)), + (s32ImmPred:$src2)), + (S4_subaddi IntRegs:$src1, s32ImmPred:$src2, IntRegs:$src3)>; + + +// Add or subtract doublewords with carry. +//TODO: +// Rdd=add(Rss,Rtt,Px):carry +//TODO: +// Rdd=sub(Rss,Rtt,Px):carry + // Extract bitfield // Rdd=extract(Rss,#u6,#U6) // Rdd=extract(Rss,Rtt) // Rd=extract(Rs,Rtt) // Rd=extract(Rs,#u5,#U5) -let isCodeGenOnly = 0 in { def S4_extractp_rp : T_S3op_64 < "extract", 0b11, 0b100, 0>; def S4_extractp : T_S2op_extract <"extract", 0b1010, DoubleRegs, u6Imm>; -} -let hasNewValue = 1, isCodeGenOnly = 0 in { +let hasNewValue = 1 in { def S4_extract_rp : T_S3op_extract<"extract", 0b01>; def S4_extract : T_S2op_extract <"extract", 0b1101, IntRegs, u5Imm>; } // Complex add/sub halfwords/words -let Defs = [USR_OVF], isCodeGenOnly = 0 in { +let Defs = [USR_OVF] in { def S4_vxaddsubh : T_S3op_64 < "vxaddsubh", 0b01, 0b100, 0, 1>; def S4_vxaddsubw : T_S3op_64 < "vxaddsubw", 0b01, 0b000, 0, 1>; def S4_vxsubaddh : T_S3op_64 < "vxsubaddh", 0b01, 0b110, 0, 1>; def S4_vxsubaddw : T_S3op_64 < "vxsubaddw", 0b01, 0b010, 0, 1>; } -let Defs = [USR_OVF], isCodeGenOnly = 0 in { +let Defs = [USR_OVF] in { def S4_vxaddsubhr : T_S3op_64 < "vxaddsubh", 0b11, 0b000, 0, 1, 1, 1>; def S4_vxsubaddhr : T_S3op_64 < "vxsubaddh", 0b11, 0b010, 0, 1, 1, 1>; } -let Itinerary = M_tc_3x_SLOT23, Defs = [USR_OVF], isCodeGenOnly = 0 in { +let Itinerary = M_tc_3x_SLOT23, Defs = [USR_OVF] in { def M4_mac_up_s1_sat: T_MType_acc_rr<"+= mpy", 0b011, 0b000, 0, [], 0, 1, 1>; def M4_nac_up_s1_sat: T_MType_acc_rr<"-= mpy", 0b011, 0b001, 0, [], 0, 1, 1>; } // Logical xor with xor accumulation. // Rxx^=xor(Rss,Rtt) -let hasSideEffects = 0, isCodeGenOnly = 0 in +let hasSideEffects = 0 in def M4_xor_xacc : SInst <(outs DoubleRegs:$Rxx), (ins DoubleRegs:$dst2, DoubleRegs:$Rss, DoubleRegs:$Rtt), @@ -1878,15 +2026,16 @@ def M4_xor_xacc let IClass = 0b1100; - let Inst{27-23} = 0b10101; + let Inst{27-22} = 0b101010; let Inst{20-16} = Rss; let Inst{12-8} = Rtt; + let Inst{7-5} = 0b000; let Inst{4-0} = Rxx; } // Rotate and reduce bytes // Rdd=vrcrotate(Rss,Rt,#u2) -let hasSideEffects = 0, isCodeGenOnly = 0 in +let hasSideEffects = 0 in def S4_vrcrotate : SInst <(outs DoubleRegs:$Rdd), (ins DoubleRegs:$Rss, IntRegs:$Rt, u2Imm:$u2), @@ -1910,7 +2059,7 @@ def S4_vrcrotate // Rotate and reduce bytes with accumulation // Rxx+=vrcrotate(Rss,Rt,#u2) -let hasSideEffects = 0, isCodeGenOnly = 0 in +let hasSideEffects = 0 in def S4_vrcrotate_acc : SInst <(outs DoubleRegs:$Rxx), (ins DoubleRegs:$dst2, DoubleRegs:$Rss, IntRegs:$Rt, u2Imm:$u2), @@ -1931,9 +2080,8 @@ def S4_vrcrotate_acc let Inst{4-0} = Rxx; } - // Vector reduce conditional negate halfwords -let hasSideEffects = 0, isCodeGenOnly = 0 in +let hasSideEffects = 0 in def S2_vrcnegh : SInst <(outs DoubleRegs:$Rxx), (ins DoubleRegs:$dst2, DoubleRegs:$Rss, IntRegs:$Rt), @@ -1954,29 +2102,26 @@ def S2_vrcnegh } // Split bitfield -let isCodeGenOnly = 0 in def A4_bitspliti : T_S2op_2_di <"bitsplit", 0b110, 0b100>; // Arithmetic/Convergent round -let isCodeGenOnly = 0 in def A4_cround_ri : T_S2op_2_ii <"cround", 0b111, 0b000>; -let isCodeGenOnly = 0 in def A4_round_ri : T_S2op_2_ii <"round", 0b111, 0b100>; -let Defs = [USR_OVF], isCodeGenOnly = 0 in +let Defs = [USR_OVF] in def A4_round_ri_sat : T_S2op_2_ii <"round", 0b111, 0b110, 1>; // Logical-logical words. // Compound or-and -- Rx=or(Ru,and(Rx,#s10)) let isExtentSigned = 1, hasNewValue = 1, isExtendable = 1, opExtentBits = 10, - opExtendable = 3, isCodeGenOnly = 0 in + opExtendable = 3 in def S4_or_andix: ALU64Inst<(outs IntRegs:$Rx), (ins IntRegs:$Ru, IntRegs:$_src_, s10Ext:$s10), "$Rx = or($Ru, and($_src_, #$s10))" , [(set (i32 IntRegs:$Rx), - (or (i32 IntRegs:$Ru), (and (i32 IntRegs:$_src_), s10ExtPred:$s10)))] , + (or (i32 IntRegs:$Ru), (and (i32 IntRegs:$_src_), s32ImmPred:$s10)))] , "$_src_ = $Rx", ALU64_tc_2_SLOT23> { bits<5> Rx; bits<5> Ru; @@ -1993,7 +2138,7 @@ def S4_or_andix: // Miscellaneous ALU64 instructions. // -let hasNewValue = 1, hasSideEffects = 0, isCodeGenOnly = 0 in +let hasNewValue = 1, hasSideEffects = 0 in def A4_modwrapu: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), "$Rd = modwrap($Rs, $Rt)", [], "", ALU64_tc_2_SLOT23> { bits<5> Rd; @@ -2008,7 +2153,7 @@ def A4_modwrapu: ALU64Inst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), let Inst{4-0} = Rd; } -let hasSideEffects = 0, isCodeGenOnly = 0 in +let hasSideEffects = 0 in def A4_bitsplit: ALU64Inst<(outs DoubleRegs:$Rd), (ins IntRegs:$Rs, IntRegs:$Rt), "$Rd = bitsplit($Rs, $Rt)", [], "", ALU64_tc_1_SLOT23> { @@ -2024,7 +2169,54 @@ def A4_bitsplit: ALU64Inst<(outs DoubleRegs:$Rd), let Inst{4-0} = Rd; } -let isCodeGenOnly = 0 in { +let hasSideEffects = 0 in +def dep_S2_packhl: ALU64Inst<(outs DoubleRegs:$Rd), + (ins IntRegs:$Rs, IntRegs:$Rt), + "$Rd = packhl($Rs, $Rt):deprecated", [], "", ALU64_tc_1_SLOT23> { + bits<5> Rd; + bits<5> Rs; + bits<5> Rt; + + let IClass = 0b1101; + let Inst{27-24} = 0b0100; + let Inst{21} = 0b0; + let Inst{20-16} = Rs; + let Inst{12-8} = Rt; + let Inst{4-0} = Rd; +} + +let hasNewValue = 1, hasSideEffects = 0 in +def dep_A2_addsat: ALU64Inst<(outs IntRegs:$Rd), + (ins IntRegs:$Rs, IntRegs:$Rt), + "$Rd = add($Rs, $Rt):sat:deprecated", [], "", ALU64_tc_2_SLOT23> { + bits<5> Rd; + bits<5> Rs; + bits<5> Rt; + + let IClass = 0b1101; + let Inst{27-21} = 0b0101100; + let Inst{20-16} = Rs; + let Inst{12-8} = Rt; + let Inst{7} = 0b0; + let Inst{4-0} = Rd; +} + +let hasNewValue = 1, hasSideEffects = 0 in +def dep_A2_subsat: ALU64Inst<(outs IntRegs:$Rd), + (ins IntRegs:$Rs, IntRegs:$Rt), + "$Rd = sub($Rs, $Rt):sat:deprecated", [], "", ALU64_tc_2_SLOT23> { + bits<5> Rd; + bits<5> Rs; + bits<5> Rt; + + let IClass = 0b1101; + let Inst{27-21} = 0b0101100; + let Inst{20-16} = Rt; + let Inst{12-8} = Rs; + let Inst{7} = 0b1; + let Inst{4-0} = Rd; +} + // Rx[&|]=xor(Rs,Rt) def M4_or_xor : T_MType_acc_rr < "|= xor", 0b110, 0b001, 0>; def M4_and_xor : T_MType_acc_rr < "&= xor", 0b010, 0b010, 0>; @@ -2047,7 +2239,24 @@ def M4_and_and : T_MType_acc_rr < "&= and", 0b010, 0b000, 0>; def M4_xor_andn : T_MType_acc_rr < "^= and", 0b001, 0b010, 0, [], 1>; def M4_or_andn : T_MType_acc_rr < "|= and", 0b001, 0b000, 0, [], 1>; def M4_and_andn : T_MType_acc_rr < "&= and", 0b001, 0b001, 0, [], 1>; -} + +def: T_MType_acc_pat2 ; +def: T_MType_acc_pat2 ; +def: T_MType_acc_pat2 ; +def: T_MType_acc_pat2 ; +def: T_MType_acc_pat2 ; +def: T_MType_acc_pat2 ; +def: T_MType_acc_pat2 ; +def: T_MType_acc_pat2 ; + +class T_MType_acc_pat3 + : Pat <(i32 (secOp IntRegs:$src1, (firstOp IntRegs:$src2, + (not IntRegs:$src3)))), + (i32 (MI IntRegs:$src1, IntRegs:$src2, IntRegs:$src3))>; + +def: T_MType_acc_pat3 ; +def: T_MType_acc_pat3 ; +def: T_MType_acc_pat3 ; // Compound or-or and or-and let isExtentSigned = 1, InputType = "imm", hasNewValue = 1, isExtendable = 1, @@ -2057,7 +2266,7 @@ class T_CompOR MajOp, SDNode OpNode> (ins IntRegs:$src1, IntRegs:$Rs, s10Ext:$s10), "$Rx |= "#mnemonic#"($Rs, #$s10)", [(set (i32 IntRegs:$Rx), (or (i32 IntRegs:$src1), - (OpNode (i32 IntRegs:$Rs), s10ExtPred:$s10)))], + (OpNode (i32 IntRegs:$Rs), s32ImmPred:$s10)))], "$src1 = $Rx", ALU64_tc_2_SLOT23>, ImmRegRel { bits<5> Rx; bits<5> Rs; @@ -2073,10 +2282,10 @@ class T_CompOR MajOp, SDNode OpNode> let Inst{4-0} = Rx; } -let CextOpcode = "ORr_ANDr", isCodeGenOnly = 0 in +let CextOpcode = "ORr_ANDr" in def S4_or_andi : T_CompOR <"and", 0b00, and>; -let CextOpcode = "ORr_ORr", isCodeGenOnly = 0 in +let CextOpcode = "ORr_ORr" in def S4_or_ori : T_CompOR <"or", 0b10, or>; // Modulo wrap @@ -2121,22 +2330,33 @@ def S4_or_ori : T_CompOR <"or", 0b10, or>; //===----------------------------------------------------------------------===// // Bit reverse -let isCodeGenOnly = 0 in def S2_brevp : T_S2op_3 <"brev", 0b11, 0b110>; // Bit count -let isCodeGenOnly = 0 in { def S2_ct0p : T_COUNT_LEADING_64<"ct0", 0b111, 0b010>; def S2_ct1p : T_COUNT_LEADING_64<"ct1", 0b111, 0b100>; def S4_clbpnorm : T_COUNT_LEADING_64<"normamt", 0b011, 0b000>; -} -def: Pat<(i32 (trunc (cttz (i64 DoubleRegs:$Rss)))), - (S2_ct0p (i64 DoubleRegs:$Rss))>; -def: Pat<(i32 (trunc (cttz (not (i64 DoubleRegs:$Rss))))), - (S2_ct1p (i64 DoubleRegs:$Rss))>; +// Count trailing zeros: 64-bit. +def: Pat<(i32 (trunc (cttz I64:$Rss))), (S2_ct0p I64:$Rss)>; +def: Pat<(i32 (trunc (cttz_zero_undef I64:$Rss))), (S2_ct0p I64:$Rss)>; -let hasSideEffects = 0, hasNewValue = 1, isCodeGenOnly = 0 in +// Count trailing ones: 64-bit. +def: Pat<(i32 (trunc (cttz (not I64:$Rss)))), (S2_ct1p I64:$Rss)>; +def: Pat<(i32 (trunc (cttz_zero_undef (not I64:$Rss)))), (S2_ct1p I64:$Rss)>; + +// Define leading/trailing patterns that require zero-extensions to 64 bits. +def: Pat<(i64 (ctlz I64:$Rss)), (Zext64 (S2_cl0p I64:$Rss))>; +def: Pat<(i64 (ctlz_zero_undef I64:$Rss)), (Zext64 (S2_cl0p I64:$Rss))>; +def: Pat<(i64 (cttz I64:$Rss)), (Zext64 (S2_ct0p I64:$Rss))>; +def: Pat<(i64 (cttz_zero_undef I64:$Rss)), (Zext64 (S2_ct0p I64:$Rss))>; +def: Pat<(i64 (ctlz (not I64:$Rss))), (Zext64 (S2_cl1p I64:$Rss))>; +def: Pat<(i64 (ctlz_zero_undef (not I64:$Rss))), (Zext64 (S2_cl1p I64:$Rss))>; +def: Pat<(i64 (cttz (not I64:$Rss))), (Zext64 (S2_ct1p I64:$Rss))>; +def: Pat<(i64 (cttz_zero_undef (not I64:$Rss))), (Zext64 (S2_ct1p I64:$Rss))>; + + +let hasSideEffects = 0, hasNewValue = 1 in def S4_clbaddi : SInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, s6Imm:$s6), "$Rd = add(clb($Rs), #$s6)", [], "", S_2op_tc_2_SLOT23> { bits<5> Rs; @@ -2151,7 +2371,7 @@ def S4_clbaddi : SInst<(outs IntRegs:$Rd), (ins IntRegs:$Rs, s6Imm:$s6), let Inst{4-0} = Rd; } -let hasSideEffects = 0, hasNewValue = 1, isCodeGenOnly = 0 in +let hasSideEffects = 0, hasNewValue = 1 in def S4_clbpaddi : SInst<(outs IntRegs:$Rd), (ins DoubleRegs:$Rs, s6Imm:$s6), "$Rd = add(clb($Rs), #$s6)", [], "", S_2op_tc_2_SLOT23> { bits<5> Rs; @@ -2168,10 +2388,8 @@ def S4_clbpaddi : SInst<(outs IntRegs:$Rd), (ins DoubleRegs:$Rs, s6Imm:$s6), // Bit test/set/clear -let isCodeGenOnly = 0 in { def S4_ntstbit_i : T_TEST_BIT_IMM<"!tstbit", 0b001>; def S4_ntstbit_r : T_TEST_BIT_REG<"!tstbit", 1>; -} let AddedComplexity = 20 in { // Complexity greater than cmp reg-imm. def: Pat<(i1 (seteq (and (shl 1, u5ImmPred:$u5), (i32 IntRegs:$Rs)), 0)), @@ -2191,11 +2409,9 @@ let AddedComplexity = 100 in def: Pat<(i1 (seteq (and (i32 IntRegs:$Rs), (i32 Set5ImmPred:$u5)), (i32 0))), (S4_ntstbit_i (i32 IntRegs:$Rs), (BITPOS32 Set5ImmPred:$u5))>; -let isCodeGenOnly = 0 in { def C4_nbitsset : T_TEST_BITS_REG<"!bitsset", 0b01, 1>; def C4_nbitsclr : T_TEST_BITS_REG<"!bitsclr", 0b10, 1>; def C4_nbitsclri : T_TEST_BITS_IMM<"!bitsclr", 0b10, 1>; -} // Do not increase complexity of these patterns. In the DAG, "cmp i8" may be // represented as a compare against "value & 0xFF", which is an exact match @@ -2220,14 +2436,13 @@ def: Pat<(i1 (setne (and I32:$Rs, I32:$Rt), I32:$Rt)), // Rd=add(#u6,mpyi(Rs,#U6)) -- Multiply by immed and add immed. -let hasNewValue = 1, isExtendable = 1, opExtentBits = 6, opExtendable = 1, - isCodeGenOnly = 0 in +let hasNewValue = 1, isExtendable = 1, opExtentBits = 6, opExtendable = 1 in def M4_mpyri_addi : MInst<(outs IntRegs:$Rd), (ins u6Ext:$u6, IntRegs:$Rs, u6Imm:$U6), "$Rd = add(#$u6, mpyi($Rs, #$U6))" , [(set (i32 IntRegs:$Rd), (add (mul (i32 IntRegs:$Rs), u6ImmPred:$U6), - u6ExtPred:$u6))] ,"",ALU64_tc_3x_SLOT23> { + u32ImmPred:$u6))] ,"",ALU64_tc_3x_SLOT23> { bits<5> Rd; bits<6> u6; bits<5> Rs; @@ -2247,12 +2462,12 @@ def M4_mpyri_addi : MInst<(outs IntRegs:$Rd), // Rd=add(#u6,mpyi(Rs,Rt)) let CextOpcode = "ADD_MPY", InputType = "imm", hasNewValue = 1, - isExtendable = 1, opExtentBits = 6, opExtendable = 1, isCodeGenOnly = 0 in + isExtendable = 1, opExtentBits = 6, opExtendable = 1 in def M4_mpyrr_addi : MInst <(outs IntRegs:$Rd), (ins u6Ext:$u6, IntRegs:$Rs, IntRegs:$Rt), "$Rd = add(#$u6, mpyi($Rs, $Rt))" , [(set (i32 IntRegs:$Rd), - (add (mul (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)), u6ExtPred:$u6))], + (add (mul (i32 IntRegs:$Rs), (i32 IntRegs:$Rt)), u32ImmPred:$u6))], "", ALU64_tc_3x_SLOT23>, ImmRegRel { bits<5> Rd; bits<6> u6; @@ -2297,18 +2512,16 @@ class T_AddMpy let Inst{4-0} = src1; } -let isCodeGenOnly = 0 in def M4_mpyri_addr_u2 : T_AddMpy<0b0, u6_2ImmPred, (ins IntRegs:$src1, u6_2Imm:$src2, IntRegs:$src3)>; let isExtendable = 1, opExtentBits = 6, opExtendable = 3, - CextOpcode = "ADD_MPY", InputType = "imm", isCodeGenOnly = 0 in -def M4_mpyri_addr : T_AddMpy<0b1, u6ExtPred, + CextOpcode = "ADD_MPY", InputType = "imm" in +def M4_mpyri_addr : T_AddMpy<0b1, u32ImmPred, (ins IntRegs:$src1, IntRegs:$src3, u6Ext:$src2)>, ImmRegRel; // Rx=add(Ru,mpyi(Rx,Rs)) -let validSubTargets = HasV4SubT, CextOpcode = "ADD_MPY", InputType = "reg", - hasNewValue = 1, isCodeGenOnly = 0 in +let CextOpcode = "ADD_MPY", InputType = "reg", hasNewValue = 1 in def M4_mpyrr_addr: MInst_acc <(outs IntRegs:$Rx), (ins IntRegs:$Ru, IntRegs:$_src_, IntRegs:$Rs), "$Rx = add($Ru, mpyi($_src_, $Rs))", @@ -2327,78 +2540,51 @@ def M4_mpyrr_addr: MInst_acc <(outs IntRegs:$Rx), let Inst{20-16} = Rs; } -// Rd=add(##,mpyi(Rs,#U6)) -def : Pat <(add (mul (i32 IntRegs:$src2), u6ImmPred:$src3), - (HexagonCONST32 tglobaladdr:$src1)), - (i32 (M4_mpyri_addi tglobaladdr:$src1, IntRegs:$src2, - u6ImmPred:$src3))>; - -// Rd=add(##,mpyi(Rs,Rt)) -def : Pat <(add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)), - (HexagonCONST32 tglobaladdr:$src1)), - (i32 (M4_mpyrr_addi tglobaladdr:$src1, IntRegs:$src2, - IntRegs:$src3))>; // Vector reduce multiply word by signed half (32x16) //Rdd=vrmpyweh(Rss,Rtt)[:<<1] -let isCodeGenOnly = 0 in { def M4_vrmpyeh_s0 : T_M2_vmpy<"vrmpyweh", 0b010, 0b100, 0, 0, 0>; def M4_vrmpyeh_s1 : T_M2_vmpy<"vrmpyweh", 0b110, 0b100, 1, 0, 0>; -} //Rdd=vrmpywoh(Rss,Rtt)[:<<1] -let isCodeGenOnly = 0 in { def M4_vrmpyoh_s0 : T_M2_vmpy<"vrmpywoh", 0b001, 0b010, 0, 0, 0>; def M4_vrmpyoh_s1 : T_M2_vmpy<"vrmpywoh", 0b101, 0b010, 1, 0, 0>; -} + //Rdd+=vrmpyweh(Rss,Rtt)[:<<1] -let isCodeGenOnly = 0 in { def M4_vrmpyeh_acc_s0: T_M2_vmpy_acc<"vrmpyweh", 0b001, 0b110, 0, 0>; def M4_vrmpyeh_acc_s1: T_M2_vmpy_acc<"vrmpyweh", 0b101, 0b110, 1, 0>; -} //Rdd=vrmpywoh(Rss,Rtt)[:<<1] -let isCodeGenOnly = 0 in { def M4_vrmpyoh_acc_s0: T_M2_vmpy_acc<"vrmpywoh", 0b011, 0b110, 0, 0>; def M4_vrmpyoh_acc_s1: T_M2_vmpy_acc<"vrmpywoh", 0b111, 0b110, 1, 0>; -} // Vector multiply halfwords, signed by unsigned // Rdd=vmpyhsu(Rs,Rt)[:<<]:sat -let isCodeGenOnly = 0 in { def M2_vmpy2su_s0 : T_XTYPE_mpy64 < "vmpyhsu", 0b000, 0b111, 1, 0, 0>; def M2_vmpy2su_s1 : T_XTYPE_mpy64 < "vmpyhsu", 0b100, 0b111, 1, 1, 0>; -} // Rxx+=vmpyhsu(Rs,Rt)[:<<1]:sat -let isCodeGenOnly = 0 in { def M2_vmac2su_s0 : T_XTYPE_mpy64_acc < "vmpyhsu", "+", 0b011, 0b101, 1, 0, 0>; def M2_vmac2su_s1 : T_XTYPE_mpy64_acc < "vmpyhsu", "+", 0b111, 0b101, 1, 1, 0>; -} // Vector polynomial multiply halfwords // Rdd=vpmpyh(Rs,Rt) -let isCodeGenOnly = 0 in def M4_vpmpyh : T_XTYPE_mpy64 < "vpmpyh", 0b110, 0b111, 0, 0, 0>; // Rxx^=vpmpyh(Rs,Rt) -let isCodeGenOnly = 0 in def M4_vpmpyh_acc : T_XTYPE_mpy64_acc < "vpmpyh", "^", 0b101, 0b111, 0, 0, 0>; // Polynomial multiply words // Rdd=pmpyw(Rs,Rt) -let isCodeGenOnly = 0 in def M4_pmpyw : T_XTYPE_mpy64 < "pmpyw", 0b010, 0b111, 0, 0, 0>; // Rxx^=pmpyw(Rs,Rt) -let isCodeGenOnly = 0 in def M4_pmpyw_acc : T_XTYPE_mpy64_acc < "pmpyw", "^", 0b001, 0b111, 0, 0, 0>; //===----------------------------------------------------------------------===// // XTYPE/MPY - //===----------------------------------------------------------------------===// - //===----------------------------------------------------------------------===// // ALU64/Vector compare //===----------------------------------------------------------------------===// @@ -2430,33 +2616,25 @@ class T_vcmpImm cmpOp, bits<2> minOp, Operand ImmOprnd> } // Vector compare bytes -let isCodeGenOnly = 0 in def A4_vcmpbgt : T_vcmp <"vcmpb.gt", 0b1010>; def: T_vcmp_pat; let AsmString = "$Pd = any8(vcmpb.eq($Rss, $Rtt))" in -let isCodeGenOnly = 0 in def A4_vcmpbeq_any : T_vcmp <"any8(vcmpb.gt", 0b1000>; -let isCodeGenOnly = 0 in { def A4_vcmpbeqi : T_vcmpImm <"vcmpb.eq", 0b00, 0b00, u8Imm>; def A4_vcmpbgti : T_vcmpImm <"vcmpb.gt", 0b01, 0b00, s8Imm>; def A4_vcmpbgtui : T_vcmpImm <"vcmpb.gtu", 0b10, 0b00, u7Imm>; -} // Vector compare halfwords -let isCodeGenOnly = 0 in { def A4_vcmpheqi : T_vcmpImm <"vcmph.eq", 0b00, 0b01, s8Imm>; def A4_vcmphgti : T_vcmpImm <"vcmph.gt", 0b01, 0b01, s8Imm>; def A4_vcmphgtui : T_vcmpImm <"vcmph.gtu", 0b10, 0b01, u7Imm>; -} // Vector compare words -let isCodeGenOnly = 0 in { def A4_vcmpweqi : T_vcmpImm <"vcmpw.eq", 0b00, 0b10, s8Imm>; def A4_vcmpwgti : T_vcmpImm <"vcmpw.gt", 0b01, 0b10, s8Imm>; def A4_vcmpwgtui : T_vcmpImm <"vcmpw.gtu", 0b10, 0b10, u7Imm>; -} //===----------------------------------------------------------------------===// // XTYPE/SHIFT + @@ -2467,13 +2645,13 @@ def A4_vcmpwgtui : T_vcmpImm <"vcmpw.gtu", 0b10, 0b10, u7Imm>; // Rx=and(#u8,asl(Rx,#U5)) Rx=and(#u8,lsr(Rx,#U5)) // Rx=or(#u8,asl(Rx,#U5)) Rx=or(#u8,lsr(Rx,#U5)) let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, opExtentBits = 8, - hasNewValue = 1, opNewValue = 0, validSubTargets = HasV4SubT in + hasNewValue = 1, opNewValue = 0 in class T_S4_ShiftOperate MajOp, InstrItinClass Itin> : MInst_acc<(outs IntRegs:$Rd), (ins u8Ext:$u8, IntRegs:$Rx, u5Imm:$U5), "$Rd = "#MnOp#"(#$u8, "#MnSh#"($Rx, #$U5))", [(set (i32 IntRegs:$Rd), - (Op (Sh I32:$Rx, u5ImmPred:$U5), u8ExtPred:$u8))], + (Op (Sh I32:$Rx, u5ImmPred:$U5), u32ImmPred:$u8))], "$Rd = $Rx", Itin> { bits<5> Rd; @@ -2499,35 +2677,44 @@ multiclass T_ShiftOperate MajOp, def _lsr_ri : T_S4_ShiftOperate; } -let AddedComplexity = 200, isCodeGenOnly = 0 in { +let AddedComplexity = 200 in { defm S4_addi : T_ShiftOperate<"add", add, 0b10, ALU64_tc_2_SLOT23>; defm S4_andi : T_ShiftOperate<"and", and, 0b00, ALU64_tc_2_SLOT23>; } -let AddedComplexity = 30, isCodeGenOnly = 0 in +let AddedComplexity = 30 in defm S4_ori : T_ShiftOperate<"or", or, 0b01, ALU64_tc_1_SLOT23>; -let isCodeGenOnly = 0 in defm S4_subi : T_ShiftOperate<"sub", sub, 0b11, ALU64_tc_1_SLOT23>; +let AddedComplexity = 200 in { + def: Pat<(add addrga:$addr, (shl I32:$src2, u5ImmPred:$src3)), + (S4_addi_asl_ri addrga:$addr, IntRegs:$src2, u5ImmPred:$src3)>; + def: Pat<(add addrga:$addr, (srl I32:$src2, u5ImmPred:$src3)), + (S4_addi_lsr_ri addrga:$addr, IntRegs:$src2, u5ImmPred:$src3)>; + def: Pat<(sub addrga:$addr, (shl I32:$src2, u5ImmPred:$src3)), + (S4_subi_asl_ri addrga:$addr, IntRegs:$src2, u5ImmPred:$src3)>; + def: Pat<(sub addrga:$addr, (srl I32:$src2, u5ImmPred:$src3)), + (S4_subi_lsr_ri addrga:$addr, IntRegs:$src2, u5ImmPred:$src3)>; +} + // Vector conditional negate // Rdd=vcnegh(Rss,Rt) -let Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23, isCodeGenOnly = 0 in +let Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23 in def S2_vcnegh : T_S3op_shiftVect < "vcnegh", 0b11, 0b01>; // Rd=[cround|round](Rs,Rt) -let hasNewValue = 1, Itinerary = S_3op_tc_2_SLOT23, isCodeGenOnly = 0 in { +let hasNewValue = 1, Itinerary = S_3op_tc_2_SLOT23 in { def A4_cround_rr : T_S3op_3 < "cround", IntRegs, 0b11, 0b00>; def A4_round_rr : T_S3op_3 < "round", IntRegs, 0b11, 0b10>; } // Rd=round(Rs,Rt):sat -let hasNewValue = 1, Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23, - isCodeGenOnly = 0 in +let hasNewValue = 1, Defs = [USR_OVF], Itinerary = S_3op_tc_2_SLOT23 in def A4_round_rr_sat : T_S3op_3 < "round", IntRegs, 0b11, 0b11, 1>; // Rd=[cmpyiwh|cmpyrwh](Rss,Rt):<<1:rnd:sat -let Defs = [USR_OVF], Itinerary = S_3op_tc_3x_SLOT23, isCodeGenOnly = 0 in { +let Defs = [USR_OVF], Itinerary = S_3op_tc_3x_SLOT23 in { def M4_cmpyi_wh : T_S3op_8<"cmpyiwh", 0b100, 1, 1, 1>; def M4_cmpyr_wh : T_S3op_8<"cmpyrwh", 0b110, 1, 1, 1>; } @@ -2554,10 +2741,8 @@ class T_S3op_carry MajOp> let Inst{4-0} = Rdd; } -let isCodeGenOnly = 0 in { def A4_addp_c : T_S3op_carry < "add", 0b110 >; def A4_subp_c : T_S3op_carry < "sub", 0b111 >; -} let Itinerary = S_3op_tc_3_SLOT23, hasSideEffects = 0 in class T_S3op_6 MinOp, bit isUnsigned> @@ -2581,32 +2766,26 @@ class T_S3op_6 MinOp, bit isUnsigned> // Vector reduce maximum halfwords // Rxx=vrmax[u]h(Rss,Ru) -let isCodeGenOnly = 0 in { def A4_vrmaxh : T_S3op_6 < "vrmaxh", 0b001, 0>; def A4_vrmaxuh : T_S3op_6 < "vrmaxuh", 0b001, 1>; -} + // Vector reduce maximum words // Rxx=vrmax[u]w(Rss,Ru) -let isCodeGenOnly = 0 in { def A4_vrmaxw : T_S3op_6 < "vrmaxw", 0b010, 0>; def A4_vrmaxuw : T_S3op_6 < "vrmaxuw", 0b010, 1>; -} + // Vector reduce minimum halfwords // Rxx=vrmin[u]h(Rss,Ru) -let isCodeGenOnly = 0 in { def A4_vrminh : T_S3op_6 < "vrminh", 0b101, 0>; def A4_vrminuh : T_S3op_6 < "vrminuh", 0b101, 1>; -} // Vector reduce minimum words // Rxx=vrmin[u]w(Rss,Ru) -let isCodeGenOnly = 0 in { def A4_vrminw : T_S3op_6 < "vrminw", 0b110, 0>; def A4_vrminuw : T_S3op_6 < "vrminuw", 0b110, 1>; -} // Shift an immediate left by register amount. -let hasNewValue = 1, hasSideEffects = 0, isCodeGenOnly = 0 in +let hasNewValue = 1, hasSideEffects = 0 in def S4_lsli: SInst <(outs IntRegs:$Rd), (ins s6Imm:$s6, IntRegs:$Rt), "$Rd = lsl(#$s6, $Rt)" , [(set (i32 IntRegs:$Rd), (shl s6ImmPred:$s6, @@ -2638,7 +2817,7 @@ def MEMOPIMM : SDNodeXFormgetSExtValue(); - return XformM5ToU5Imm(imm); + return XformM5ToU5Imm(imm, SDLoc(N)); }]>; def MEMOPIMM_HALF : SDNodeXFormgetSExtValue(); - return XformM5ToU5Imm(imm); + return XformM5ToU5Imm(imm, SDLoc(N)); }]>; def MEMOPIMM_BYTE : SDNodeXFormgetSExtValue(); - return XformM5ToU5Imm(imm); + return XformM5ToU5Imm(imm, SDLoc(N)); }]>; def SETMEMIMM : SDNodeXFormgetSExtValue(); - return XformMskToBitPosU5Imm(imm); + return XformMskToBitPosU5Imm(imm, SDLoc(N)); }]>; def CLRMEMIMM : SDNodeXFormgetSExtValue()); - return XformMskToBitPosU5Imm(imm); + return XformMskToBitPosU5Imm(imm, SDLoc(N)); }]>; def SETMEMIMM_SHORT : SDNodeXFormgetSExtValue(); - return XformMskToBitPosU4Imm(imm); + return XformMskToBitPosU4Imm(imm, SDLoc(N)); }]>; def CLRMEMIMM_SHORT : SDNodeXFormgetSExtValue()); - return XformMskToBitPosU4Imm(imm); + return XformMskToBitPosU4Imm(imm, SDLoc(N)); }]>; def SETMEMIMM_BYTE : SDNodeXFormgetSExtValue(); - return XformMskToBitPosU3Imm(imm); + return XformMskToBitPosU3Imm(imm, SDLoc(N)); }]>; def CLRMEMIMM_BYTE : SDNodeXFormgetSExtValue()); - return XformMskToBitPosU3Imm(imm); + return XformMskToBitPosU3Imm(imm, SDLoc(N)); }]>; //===----------------------------------------------------------------------===// @@ -2789,15 +2968,14 @@ multiclass MemOp_base opcBits, Operand ImmOp> { } // Define MemOp instructions. -let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, - validSubTargets =HasV4SubT in { - let opExtentBits = 6, accessSize = ByteAccess, isCodeGenOnly = 0 in +let isExtendable = 1, opExtendable = 1, isExtentSigned = 0 in { + let opExtentBits = 6, accessSize = ByteAccess in defm memopb_io : MemOp_base <"memb", 0b00, u6_0Ext>; - let opExtentBits = 7, accessSize = HalfWordAccess, isCodeGenOnly = 0 in + let opExtentBits = 7, accessSize = HalfWordAccess in defm memoph_io : MemOp_base <"memh", 0b01, u6_1Ext>; - let opExtentBits = 8, accessSize = WordAccess, isCodeGenOnly = 0 in + let opExtentBits = 8, accessSize = WordAccess in defm memopw_io : MemOp_base <"memw", 0b10, u6_2Ext>; } @@ -2808,43 +2986,43 @@ let isExtendable = 1, opExtendable = 1, isExtentSigned = 0, // mem[bh](Rs+#u6) += #U5 //===----------------------------------------------------------------------===// -multiclass MemOpi_u5Pats { let AddedComplexity = 180 in - def : Pat < (stOp (OpNode (ldOp IntRegs:$addr), u5ImmPred:$addend), - IntRegs:$addr), - (MI IntRegs:$addr, #0, u5ImmPred:$addend )>; + def: Pat<(stOp (OpNode (ldOp IntRegs:$addr), u5ImmPred:$addend), + IntRegs:$addr), + (MI IntRegs:$addr, 0, u5ImmPred:$addend)>; let AddedComplexity = 190 in - def : Pat <(stOp (OpNode (ldOp (add IntRegs:$base, ExtPred:$offset)), - u5ImmPred:$addend), - (add IntRegs:$base, ExtPred:$offset)), - (MI IntRegs:$base, ExtPred:$offset, u5ImmPred:$addend)>; + def: Pat<(stOp (OpNode (ldOp (add IntRegs:$base, ImmPred:$offset)), + u5ImmPred:$addend), + (add IntRegs:$base, ImmPred:$offset)), + (MI IntRegs:$base, ImmPred:$offset, u5ImmPred:$addend)>; } -multiclass MemOpi_u5ALUOp { - defm : MemOpi_u5Pats; - defm : MemOpi_u5Pats; + defm: MemOpi_u5Pats; + defm: MemOpi_u5Pats; } multiclass MemOpi_u5ExtType { // Half Word - defm : MemOpi_u5ALUOp ; + defm: MemOpi_u5ALUOp ; // Byte - defm : MemOpi_u5ALUOp ; + defm: MemOpi_u5ALUOp ; } -let Predicates = [HasV4T, UseMEMOP] in { - defm : MemOpi_u5ExtType; // zero extend - defm : MemOpi_u5ExtType; // sign extend - defm : MemOpi_u5ExtType; // any extend +let Predicates = [UseMEMOP] in { + defm: MemOpi_u5ExtType; // zero extend + defm: MemOpi_u5ExtType; // sign extend + defm: MemOpi_u5ExtType; // any extend // Word - defm : MemOpi_u5ALUOp ; + defm: MemOpi_u5ALUOp ; } //===----------------------------------------------------------------------===// @@ -2854,38 +3032,37 @@ let Predicates = [HasV4T, UseMEMOP] in { // mem[bh](Rs+#u6) += #m5 //===----------------------------------------------------------------------===// -multiclass MemOpi_m5Pats { +multiclass MemOpi_m5Pats { let AddedComplexity = 190 in - def : Pat <(stOp (add (ldOp IntRegs:$addr), immPred:$subend), - IntRegs:$addr), - (MI IntRegs:$addr, #0, (xformFunc immPred:$subend) )>; + def: Pat<(stOp (add (ldOp IntRegs:$addr), immPred:$subend), IntRegs:$addr), + (MI IntRegs:$addr, 0, (xformFunc immPred:$subend))>; let AddedComplexity = 195 in - def : Pat<(stOp (add (ldOp (add IntRegs:$base, extPred:$offset)), - immPred:$subend), - (add IntRegs:$base, extPred:$offset)), - (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$subend))>; + def: Pat<(stOp (add (ldOp (add IntRegs:$base, ImmPred:$offset)), + immPred:$subend), + (add IntRegs:$base, ImmPred:$offset)), + (MI IntRegs:$base, ImmPred:$offset, (xformFunc immPred:$subend))>; } multiclass MemOpi_m5ExtType { // Half Word - defm : MemOpi_m5Pats ; + defm: MemOpi_m5Pats ; // Byte - defm : MemOpi_m5Pats ; + defm: MemOpi_m5Pats ; } -let Predicates = [HasV4T, UseMEMOP] in { - defm : MemOpi_m5ExtType; // zero extend - defm : MemOpi_m5ExtType; // sign extend - defm : MemOpi_m5ExtType; // any extend +let Predicates = [UseMEMOP] in { + defm: MemOpi_m5ExtType; // zero extend + defm: MemOpi_m5ExtType; // sign extend + defm: MemOpi_m5ExtType; // any extend // Word - defm : MemOpi_m5Pats ; + defm: MemOpi_m5Pats ; } //===----------------------------------------------------------------------===// @@ -2895,52 +3072,50 @@ let Predicates = [HasV4T, UseMEMOP] in { //===----------------------------------------------------------------------===// multiclass MemOpi_bitPats { + PatLeaf extPred, SDNodeXForm xformFunc, InstHexagon MI, + SDNode OpNode> { // mem[bhw](Rs+#u6:[012]) = [clrbit|setbit](#U5) let AddedComplexity = 250 in - def : Pat<(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)), - immPred:$bitend), - (add IntRegs:$base, extPred:$offset)), - (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$bitend))>; + def: Pat<(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)), + immPred:$bitend), + (add IntRegs:$base, extPred:$offset)), + (MI IntRegs:$base, extPred:$offset, (xformFunc immPred:$bitend))>; // mem[bhw](Rs+#0) = [clrbit|setbit](#U5) let AddedComplexity = 225 in - def : Pat <(stOp (OpNode (ldOp (addrPred IntRegs:$addr, extPred:$offset)), - immPred:$bitend), - (addrPred (i32 IntRegs:$addr), extPred:$offset)), - (MI IntRegs:$addr, extPred:$offset, (xformFunc immPred:$bitend))>; + def: Pat<(stOp (OpNode (ldOp IntRegs:$addr), immPred:$bitend), IntRegs:$addr), + (MI IntRegs:$addr, 0, (xformFunc immPred:$bitend))>; } -multiclass MemOpi_bitExtType { +multiclass MemOpi_bitExtType { // Byte - clrbit - defm : MemOpi_bitPats; + defm: MemOpi_bitPats; // Byte - setbit - defm : MemOpi_bitPats; + defm: MemOpi_bitPats; // Half Word - clrbit - defm : MemOpi_bitPats; + defm: MemOpi_bitPats; // Half Word - setbit - defm : MemOpi_bitPats; + defm: MemOpi_bitPats; } -let Predicates = [HasV4T, UseMEMOP] in { +let Predicates = [UseMEMOP] in { // mem[bh](Rs+#0) = [clrbit|setbit](#U5) // mem[bh](Rs+#u6:[01]) = [clrbit|setbit](#U5) - defm : MemOpi_bitExtType; // zero extend - defm : MemOpi_bitExtType; // sign extend - defm : MemOpi_bitExtType; // any extend + defm: MemOpi_bitExtType; // zero extend + defm: MemOpi_bitExtType; // sign extend + defm: MemOpi_bitExtType; // any extend // memw(Rs+#0) = [clrbit|setbit](#U5) // memw(Rs+#u6:2) = [clrbit|setbit](#U5) - defm : MemOpi_bitPats; - defm : MemOpi_bitPats; + defm: MemOpi_bitPats; + defm: MemOpi_bitPats; } //===----------------------------------------------------------------------===// @@ -2950,54 +3125,51 @@ let Predicates = [HasV4T, UseMEMOP] in { // mem[bhw](Rs+#U6:[012]) [+-&|]= Rt //===----------------------------------------------------------------------===// -multiclass MemOpr_Pats { +multiclass MemOpr_Pats { let AddedComplexity = 141 in // mem[bhw](Rs+#0) [+-&|]= Rt - def : Pat <(stOp (OpNode (ldOp (addrPred IntRegs:$addr, extPred:$offset)), - (i32 IntRegs:$addend)), - (addrPred (i32 IntRegs:$addr), extPred:$offset)), - (MI IntRegs:$addr, extPred:$offset, (i32 IntRegs:$addend) )>; + def: Pat<(stOp (OpNode (ldOp IntRegs:$addr), (i32 IntRegs:$addend)), + IntRegs:$addr), + (MI IntRegs:$addr, 0, (i32 IntRegs:$addend))>; // mem[bhw](Rs+#U6:[012]) [+-&|]= Rt let AddedComplexity = 150 in - def : Pat <(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)), - (i32 IntRegs:$orend)), - (add IntRegs:$base, extPred:$offset)), - (MI IntRegs:$base, extPred:$offset, (i32 IntRegs:$orend) )>; + def: Pat<(stOp (OpNode (ldOp (add IntRegs:$base, extPred:$offset)), + (i32 IntRegs:$orend)), + (add IntRegs:$base, extPred:$offset)), + (MI IntRegs:$base, extPred:$offset, (i32 IntRegs:$orend))>; } -multiclass MemOPr_ALUOp { - - defm : MemOpr_Pats ; - defm : MemOpr_Pats ; - defm : MemOpr_Pats ; - defm : MemOpr_Pats ; + InstHexagon andMI, InstHexagon orMI> { + defm: MemOpr_Pats ; + defm: MemOpr_Pats ; + defm: MemOpr_Pats ; + defm: MemOpr_Pats ; } multiclass MemOPr_ExtType { // Half Word - defm : MemOPr_ALUOp ; + defm: MemOPr_ALUOp ; // Byte - defm : MemOPr_ALUOp ; + defm: MemOPr_ALUOp ; } // Define 'def Pats' for MemOps with register addend. -let Predicates = [HasV4T, UseMEMOP] in { +let Predicates = [UseMEMOP] in { // Byte, Half Word - defm : MemOPr_ExtType; // zero extend - defm : MemOPr_ExtType; // sign extend - defm : MemOPr_ExtType; // any extend + defm: MemOPr_ExtType; // zero extend + defm: MemOPr_ExtType; // sign extend + defm: MemOPr_ExtType; // any extend // Word - defm : MemOPr_ALUOp ; + defm: MemOPr_ALUOp ; } //===----------------------------------------------------------------------===// @@ -3016,217 +3188,40 @@ let Predicates = [HasV4T, UseMEMOP] in { // Pd=cmpb.eq(Rs,#u8) // p=!cmp.eq(r1,#s10) -let isCodeGenOnly = 0 in { def C4_cmpneqi : T_CMP <"cmp.eq", 0b00, 1, s10Ext>; def C4_cmpltei : T_CMP <"cmp.gt", 0b01, 1, s10Ext>; def C4_cmplteui : T_CMP <"cmp.gtu", 0b10, 1, u9Ext>; -} -def : T_CMP_pat ; -def : T_CMP_pat ; +def : T_CMP_pat ; +def : T_CMP_pat ; def : T_CMP_pat ; // rs <= rt -> !(rs > rt). /* -def: Pat<(i1 (setle (i32 IntRegs:$src1), s10ExtPred:$src2)), - (C2_not (C2_cmpgti IntRegs:$src1, s10ExtPred:$src2))>; -// (C4_cmpltei IntRegs:$src1, s10ExtPred:$src2)>; +def: Pat<(i1 (setle (i32 IntRegs:$src1), s32ImmPred:$src2)), + (C2_not (C2_cmpgti IntRegs:$src1, s32ImmPred:$src2))>; +// (C4_cmpltei IntRegs:$src1, s32ImmPred:$src2)>; */ // Map cmplt(Rs, Imm) -> !cmpgt(Rs, Imm-1). -def: Pat<(i1 (setlt (i32 IntRegs:$src1), s8ExtPred:$src2)), - (C4_cmpltei IntRegs:$src1, (DEC_CONST_SIGNED s8ExtPred:$src2))>; +def: Pat<(i1 (setlt (i32 IntRegs:$src1), s32ImmPred:$src2)), + (C4_cmpltei IntRegs:$src1, (DEC_CONST_SIGNED s32ImmPred:$src2))>; // rs != rt -> !(rs == rt). -def: Pat<(i1 (setne (i32 IntRegs:$src1), s10ExtPred:$src2)), - (C4_cmpneqi IntRegs:$src1, s10ExtPred:$src2)>; +def: Pat<(i1 (setne (i32 IntRegs:$src1), s32ImmPred:$src2)), + (C4_cmpneqi IntRegs:$src1, s32ImmPred:$src2)>; // SDNode for converting immediate C to C-1. def DEC_CONST_BYTE : SDNodeXFormgetSExtValue(); - return XformU7ToU7M1Imm(imm); + return XformU7ToU7M1Imm(imm, SDLoc(N)); }]>; -// For the sequence -// zext( seteq ( and(Rs, 255), u8)) -// Generate -// Pd=cmpb.eq(Rs, #u8) -// if (Pd.new) Rd=#1 -// if (!Pd.new) Rd=#0 -def : Pat <(i32 (zext (i1 (seteq (i32 (and (i32 IntRegs:$Rs), 255)), - u8ExtPred:$u8)))), - (i32 (TFR_condset_ii (i1 (A4_cmpbeqi (i32 IntRegs:$Rs), - (u8ExtPred:$u8))), - 1, 0))>, - Requires<[HasV4T]>; - -// For the sequence -// zext( setne ( and(Rs, 255), u8)) -// Generate -// Pd=cmpb.eq(Rs, #u8) -// if (Pd.new) Rd=#0 -// if (!Pd.new) Rd=#1 -def : Pat <(i32 (zext (i1 (setne (i32 (and (i32 IntRegs:$Rs), 255)), - u8ExtPred:$u8)))), - (i32 (TFR_condset_ii (i1 (A4_cmpbeqi (i32 IntRegs:$Rs), - (u8ExtPred:$u8))), - 0, 1))>, - Requires<[HasV4T]>; - -// For the sequence -// zext( seteq (Rs, and(Rt, 255))) -// Generate -// Pd=cmpb.eq(Rs, Rt) -// if (Pd.new) Rd=#1 -// if (!Pd.new) Rd=#0 -def : Pat <(i32 (zext (i1 (seteq (i32 IntRegs:$Rt), - (i32 (and (i32 IntRegs:$Rs), 255)))))), - (i32 (TFR_condset_ii (i1 (A4_cmpbeq (i32 IntRegs:$Rs), - (i32 IntRegs:$Rt))), - 1, 0))>, - Requires<[HasV4T]>; - -// For the sequence -// zext( setne (Rs, and(Rt, 255))) -// Generate -// Pd=cmpb.eq(Rs, Rt) -// if (Pd.new) Rd=#0 -// if (!Pd.new) Rd=#1 -def : Pat <(i32 (zext (i1 (setne (i32 IntRegs:$Rt), - (i32 (and (i32 IntRegs:$Rs), 255)))))), - (i32 (TFR_condset_ii (i1 (A4_cmpbeq (i32 IntRegs:$Rs), - (i32 IntRegs:$Rt))), - 0, 1))>, - Requires<[HasV4T]>; - -// For the sequence -// zext( setugt ( and(Rs, 255), u8)) -// Generate -// Pd=cmpb.gtu(Rs, #u8) -// if (Pd.new) Rd=#1 -// if (!Pd.new) Rd=#0 -def : Pat <(i32 (zext (i1 (setugt (i32 (and (i32 IntRegs:$Rs), 255)), - u8ExtPred:$u8)))), - (i32 (TFR_condset_ii (i1 (A4_cmpbgtui (i32 IntRegs:$Rs), - (u8ExtPred:$u8))), - 1, 0))>, - Requires<[HasV4T]>; - -// For the sequence -// zext( setugt ( and(Rs, 254), u8)) -// Generate -// Pd=cmpb.gtu(Rs, #u8) -// if (Pd.new) Rd=#1 -// if (!Pd.new) Rd=#0 -def : Pat <(i32 (zext (i1 (setugt (i32 (and (i32 IntRegs:$Rs), 254)), - u8ExtPred:$u8)))), - (i32 (TFR_condset_ii (i1 (A4_cmpbgtui (i32 IntRegs:$Rs), - (u8ExtPred:$u8))), - 1, 0))>, - Requires<[HasV4T]>; - -// For the sequence -// zext( setult ( Rs, Rt)) -// Generate -// Pd=cmp.ltu(Rs, Rt) -// if (Pd.new) Rd=#1 -// if (!Pd.new) Rd=#0 -// cmp.ltu(Rs, Rt) -> cmp.gtu(Rt, Rs) -def : Pat <(i32 (zext (i1 (setult (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))), - (i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rt), - (i32 IntRegs:$Rs))), - 1, 0))>, - Requires<[HasV4T]>; - -// For the sequence -// zext( setlt ( Rs, Rt)) -// Generate -// Pd=cmp.lt(Rs, Rt) -// if (Pd.new) Rd=#1 -// if (!Pd.new) Rd=#0 -// cmp.lt(Rs, Rt) -> cmp.gt(Rt, Rs) -def : Pat <(i32 (zext (i1 (setlt (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))), - (i32 (TFR_condset_ii (i1 (C2_cmpgt (i32 IntRegs:$Rt), - (i32 IntRegs:$Rs))), - 1, 0))>, - Requires<[HasV4T]>; - -// For the sequence -// zext( setugt ( Rs, Rt)) -// Generate -// Pd=cmp.gtu(Rs, Rt) -// if (Pd.new) Rd=#1 -// if (!Pd.new) Rd=#0 -def : Pat <(i32 (zext (i1 (setugt (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))), - (i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rs), - (i32 IntRegs:$Rt))), - 1, 0))>, - Requires<[HasV4T]>; - -// This pattern interefers with coremark performance, not implementing at this -// time. -// For the sequence -// zext( setgt ( Rs, Rt)) -// Generate -// Pd=cmp.gt(Rs, Rt) -// if (Pd.new) Rd=#1 -// if (!Pd.new) Rd=#0 - -// For the sequence -// zext( setuge ( Rs, Rt)) -// Generate -// Pd=cmp.ltu(Rs, Rt) -// if (Pd.new) Rd=#0 -// if (!Pd.new) Rd=#1 -// cmp.ltu(Rs, Rt) -> cmp.gtu(Rt, Rs) -def : Pat <(i32 (zext (i1 (setuge (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))), - (i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rt), - (i32 IntRegs:$Rs))), - 0, 1))>, - Requires<[HasV4T]>; - -// For the sequence -// zext( setge ( Rs, Rt)) -// Generate -// Pd=cmp.lt(Rs, Rt) -// if (Pd.new) Rd=#0 -// if (!Pd.new) Rd=#1 -// cmp.lt(Rs, Rt) -> cmp.gt(Rt, Rs) -def : Pat <(i32 (zext (i1 (setge (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))), - (i32 (TFR_condset_ii (i1 (C2_cmpgt (i32 IntRegs:$Rt), - (i32 IntRegs:$Rs))), - 0, 1))>, - Requires<[HasV4T]>; - -// For the sequence -// zext( setule ( Rs, Rt)) -// Generate -// Pd=cmp.gtu(Rs, Rt) -// if (Pd.new) Rd=#0 -// if (!Pd.new) Rd=#1 -def : Pat <(i32 (zext (i1 (setule (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))), - (i32 (TFR_condset_ii (i1 (C2_cmpgtu (i32 IntRegs:$Rs), - (i32 IntRegs:$Rt))), - 0, 1))>, - Requires<[HasV4T]>; - -// For the sequence -// zext( setle ( Rs, Rt)) -// Generate -// Pd=cmp.gt(Rs, Rt) -// if (Pd.new) Rd=#0 -// if (!Pd.new) Rd=#1 -def : Pat <(i32 (zext (i1 (setle (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))), - (i32 (TFR_condset_ii (i1 (C2_cmpgt (i32 IntRegs:$Rs), - (i32 IntRegs:$Rt))), - 0, 1))>, - Requires<[HasV4T]>; - // For the sequence // zext( setult ( and(Rs, 255), u8)) // Use the isdigit transformation below -// Generate code of the form 'mux_ii(cmpbgtu(Rdd, C-1),0,1)' +// Generate code of the form 'C2_muxii(cmpbgtui(Rdd, C-1),0,1)' // for C code of the form r = ((c>='0') & (c<='9')) ? 1 : 0;. // The isdigit transformation relies on two 'clever' aspects: // 1) The data type is unsigned which allows us to eliminate a zero test after @@ -3239,12 +3234,11 @@ def : Pat <(i32 (zext (i1 (setle (i32 IntRegs:$Rs), (i32 IntRegs:$Rt))))), // The code is transformed upstream of llvm into // retval = (c-48) < 10 ? 1 : 0; let AddedComplexity = 139 in -def : Pat <(i32 (zext (i1 (setult (i32 (and (i32 IntRegs:$src1), 255)), - u7StrictPosImmPred:$src2)))), - (i32 (C2_muxii (i1 (A4_cmpbgtui (i32 IntRegs:$src1), - (DEC_CONST_BYTE u7StrictPosImmPred:$src2))), - 0, 1))>, - Requires<[HasV4T]>; +def: Pat<(i32 (zext (i1 (setult (i32 (and (i32 IntRegs:$src1), 255)), + u7StrictPosImmPred:$src2)))), + (C2_muxii (A4_cmpbgtui IntRegs:$src1, + (DEC_CONST_BYTE u7StrictPosImmPred:$src2)), + 0, 1)>; //===----------------------------------------------------------------------===// // XTYPE/PRED - @@ -3301,51 +3295,40 @@ multiclass LD_MISC_L4_RETURN { } let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R30], hasSideEffects = 0, - validSubTargets = HasV4SubT, isCodeGenOnly = 0 in + Defs = [R29, R30, R31, PC], Uses = [R30], hasSideEffects = 0 in defm L4_return: LD_MISC_L4_RETURN <"dealloc_return">, PredNewRel; // Restore registers and dealloc return function call. let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], isAsmParserOnly = 1 in { -let validSubTargets = HasV4SubT in - def RESTORE_DEALLOC_RET_JMP_V4 : JInst<(outs), - (ins calltarget:$dst), - "jump $dst", - []>, - Requires<[HasV4T]>; + Defs = [R29, R30, R31, PC], isPredicable = 0, isAsmParserOnly = 1 in { + def RESTORE_DEALLOC_RET_JMP_V4 : T_JMP<"">; + let isExtended = 1, opExtendable = 0 in + def RESTORE_DEALLOC_RET_JMP_V4_EXT : T_JMP<"">; } // Restore registers and dealloc frame before a tail call. -let isCall = 1, isBarrier = 1, isAsmParserOnly = 1, - Defs = [R29, R30, R31, PC] in { -let validSubTargets = HasV4SubT in - def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : JInst<(outs), - (ins calltarget:$dst), - "call $dst", - []>, - Requires<[HasV4T]>; +let isCall = 1, Defs = [R29, R30, R31, PC], isAsmParserOnly = 1 in { + def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : T_Call<"">, PredRel; + let isExtended = 1, opExtendable = 0 in + def RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT : T_Call<"">, PredRel; } // Save registers function call. -let isCall = 1, isBarrier = 1, isAsmParserOnly = 1, - Uses = [R29, R31] in { - def SAVE_REGISTERS_CALL_V4 : JInst<(outs), - (ins calltarget:$dst), - "call $dst // Save_calle_saved_registers", - []>, - Requires<[HasV4T]>; +let isCall = 1, Uses = [R29, R31], isAsmParserOnly = 1 in { + def SAVE_REGISTERS_CALL_V4 : T_Call<"">, PredRel; + let isExtended = 1, opExtendable = 0 in + def SAVE_REGISTERS_CALL_V4_EXT : T_Call<"">, PredRel; } //===----------------------------------------------------------------------===// // Template class for non predicated store instructions with // GP-Relative or absolute addressing. //===----------------------------------------------------------------------===// -let hasSideEffects = 0, isPredicable = 1, isNVStorable = 1 in +let hasSideEffects = 0, isPredicable = 1 in class T_StoreAbsGP MajOp, Operand AddrOp, bit isAbs, bit isHalf> - : STInst<(outs), (ins AddrOp:$addr, RC:$src), - mnemonic # !if(isAbs, "(##", "(#")#"$addr) = $src"#!if(isHalf, ".h",""), + bits<2>MajOp, bit isAbs, bit isHalf> + : STInst<(outs), (ins ImmOp:$addr, RC:$src), + mnemonic # "(#$addr) = $src"#!if(isHalf, ".h",""), [], "", V2LDST_tc_st_SLOT01> { bits<19> addr; bits<5> src; @@ -3356,6 +3339,9 @@ class T_StoreAbsGP MajOp, bit isHalf, bit isNot, bit isNew> - : STInst<(outs), (ins PredRegs:$src1, u6Ext:$absaddr, RC: $src2), + : STInst<(outs), (ins PredRegs:$src1, u32MustExt:$absaddr, RC: $src2), !if(isNot, "if (!$src1", "if ($src1")#!if(isNew, ".new) ", ") ")#mnemonic#"(#$absaddr) = $src2"#!if(isHalf, ".h",""), [], "", ST_tc_st_SLOT01>, AddrModeRel { @@ -3386,6 +3371,8 @@ class T_StoreAbs_Pred MajOp, let isPredicatedNew = isNew; let isPredicatedFalse = isNot; + // Store upper-half and store doubleword cannot be NV. + let isNVStorable = !if (!eq(mnemonic, "memd"), 0, !if(isHalf,0,1)); let IClass = 0b1010; @@ -3406,7 +3393,7 @@ class T_StoreAbs_Pred MajOp, //===----------------------------------------------------------------------===// class T_StoreAbs MajOp, bit isHalf> - : T_StoreAbsGP , + : T_StoreAbsGP , AddrModeRel { string ImmOpStr = !cast(ImmOp); let opExtentBits = !if (!eq(ImmOpStr, "u16_3Imm"), 19, @@ -3423,7 +3410,7 @@ class T_StoreAbs MajOp, bit isHalf = 0> { let CextOpcode = CextOp, BaseOpcode = CextOp#_abs in { @@ -3447,7 +3434,7 @@ multiclass ST_AbsMajOp, bit isAbs> - : NVInst_V4<(outs), (ins u0AlwaysExt:$addr, IntRegs:$src), + : NVInst_V4<(outs), (ins u32Imm:$addr, IntRegs:$src), mnemonic # !if(isAbs, "(##", "(#")#"$addr) = $src.new", [], "", V2LDST_tc_st_SLOT0> { bits<19> addr; @@ -3525,7 +3512,7 @@ class T_StoreAbs_NV MajOp> //===----------------------------------------------------------------------===// // Multiclass for new-value store instructions with absolute addressing. //===----------------------------------------------------------------------===// -let validSubTargets = HasV4SubT, addrMode = Absolute, isExtended = 1 in +let addrMode = Absolute, isExtended = 1 in multiclass ST_Abs_NV MajOp> { let CextOpcode = CextOp, BaseOpcode = CextOp#_abs in { @@ -3545,22 +3532,22 @@ multiclass ST_Abs_NV , ST_Abs_NV <"memb", "STrib", u16_0Imm, 0b00>; -let accessSize = HalfWordAccess, isCodeGenOnly = 0 in +let accessSize = HalfWordAccess in defm storerh : ST_Abs <"memh", "STrih", IntRegs, u16_1Imm, 0b01>, ST_Abs_NV <"memh", "STrih", u16_1Imm, 0b01>; -let accessSize = WordAccess, isCodeGenOnly = 0 in +let accessSize = WordAccess in defm storeri : ST_Abs <"memw", "STriw", IntRegs, u16_2Imm, 0b10>, ST_Abs_NV <"memw", "STriw", u16_2Imm, 0b10>; -let isNVStorable = 0, accessSize = DoubleWordAccess, isCodeGenOnly = 0 in +let isNVStorable = 0, accessSize = DoubleWordAccess in defm storerd : ST_Abs <"memd", "STrid", DoubleRegs, u16_3Imm, 0b11>; -let isNVStorable = 0, accessSize = HalfWordAccess, isCodeGenOnly = 0 in +let isNVStorable = 0, accessSize = HalfWordAccess in defm storerf : ST_Abs <"memh", "STrif", IntRegs, u16_1Imm, 0b01, 1>; //===----------------------------------------------------------------------===// @@ -3570,17 +3557,17 @@ defm storerf : ST_Abs <"memh", "STrif", IntRegs, u16_1Imm, 0b01, 1>; // if ([!]Pv[.new]) mem[bhwd](##global)=Rt //===----------------------------------------------------------------------===// -let validSubTargets = HasV4SubT, isAsmParserOnly = 1 in +let isAsmParserOnly = 1 in class T_StoreGP MajOp, bit isHalf = 0> - : T_StoreAbsGP { + : T_StoreAbsGP { // Set BaseOpcode same as absolute addressing instructions so that // non-predicated GP-Rel instructions can have relate with predicated // Absolute instruction. let BaseOpcode = BaseOp#_abs; } -let validSubTargets = HasV4SubT, isAsmParserOnly = 1 in +let isAsmParserOnly = 1 in multiclass ST_GP MajOp, bit isHalf = 0> { // Set BaseOpcode same as absolute addressing instructions so that @@ -3588,7 +3575,7 @@ multiclass ST_GP ; + 0, isHalf>; // New-value store def NAME#newgp : T_StoreAbsGP_NV ; } @@ -3648,11 +3635,11 @@ let AddedComplexity = 100 in { // Template class for non predicated load instructions with // absolute addressing mode. //===----------------------------------------------------------------------===// -let isPredicable = 1, hasSideEffects = 0, validSubTargets = HasV4SubT in +let isPredicable = 1, hasSideEffects = 0 in class T_LoadAbsGP MajOp, Operand AddrOp, bit isAbs> - : LDInst <(outs RC:$dst), (ins AddrOp:$addr), - "$dst = "#mnemonic# !if(isAbs, "(##", "(#")#"$addr)", + bits<3> MajOp> + : LDInst <(outs RC:$dst), (ins ImmOp:$addr), + "$dst = "#mnemonic# "(#$addr)", [], "", V2LDST_tc_ld_SLOT01> { bits<5> dst; bits<19> addr; @@ -3677,7 +3664,7 @@ class T_LoadAbsGP MajOp> - : T_LoadAbsGP , AddrModeRel { + : T_LoadAbsGP , AddrModeRel { string ImmOpStr = !cast(ImmOp); let opExtentBits = !if (!eq(ImmOpStr, "u16_3Imm"), 19, @@ -3690,14 +3677,16 @@ class T_LoadAbs MajOp, bit isPredNot, bit isPredNew> - : LDInst <(outs RC:$dst), (ins PredRegs:$src1, u6Ext:$absaddr), + : LDInst <(outs RC:$dst), (ins PredRegs:$src1, u32MustExt:$absaddr), !if(isPredNot, "if (!$src1", "if ($src1")#!if(isPredNew, ".new) ", ") ")#"$dst = "#mnemonic#"(#$absaddr)">, AddrModeRel { bits<5> dst; @@ -3706,6 +3695,7 @@ class T_LoadAbs_Pred MajOp, let isPredicatedNew = isPredNew; let isPredicatedFalse = isPredNot; + let hasNewValue = !if (!eq(!cast(RC), "DoubleRegs"), 0, 1); let IClass = 0b1001; @@ -3744,20 +3734,20 @@ multiclass LD_Abs; defm loadrub : LD_Abs<"memub", "LDriub", IntRegs, u16_0Imm, 0b001>; } -let accessSize = HalfWordAccess, hasNewValue = 1, isCodeGenOnly = 0 in { +let accessSize = HalfWordAccess, hasNewValue = 1 in { defm loadrh : LD_Abs<"memh", "LDrih", IntRegs, u16_1Imm, 0b010>; defm loadruh : LD_Abs<"memuh", "LDriuh", IntRegs, u16_1Imm, 0b011>; } -let accessSize = WordAccess, hasNewValue = 1, isCodeGenOnly = 0 in +let accessSize = WordAccess, hasNewValue = 1 in defm loadri : LD_Abs<"memw", "LDriw", IntRegs, u16_2Imm, 0b100>; -let accessSize = DoubleWordAccess, isCodeGenOnly = 0 in +let accessSize = DoubleWordAccess in defm loadrd : LD_Abs<"memd", "LDrid", DoubleRegs, u16_3Imm, 0b110>; //===----------------------------------------------------------------------===// @@ -3770,7 +3760,7 @@ defm loadrd : LD_Abs<"memd", "LDrid", DoubleRegs, u16_3Imm, 0b110>; let isAsmParserOnly = 1 in class T_LoadGP MajOp> - : T_LoadAbsGP , PredNewRel { + : T_LoadAbsGP , PredNewRel { let BaseOpcode = BaseOp#_abs; } @@ -3795,6 +3785,13 @@ def: Loada_pat; def: Loada_pat; def: Loada_pat; +// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd +def: Loadam_pat; +def: Loadam_pat; + +def: Stoream_pat; +def: Stoream_pat; + // Map from load(globaladdress) -> mem[u][bhwd](#foo) class LoadGP_pats : Pat <(VT (ldOp (HexagonCONST32_GP tglobaladdr:$global))), @@ -3819,21 +3816,23 @@ let AddedComplexity = 100 in { def: LoadGP_pats ; } -def: Pat<(i64 (ctlz I64:$src1)), (Zext64 (S2_cl0p I64:$src1))>; -def: Pat<(i64 (cttz I64:$src1)), (Zext64 (S2_ct0p I64:$src1))>; +// Transfer global address into a register +def: Pat<(HexagonCONST32 tglobaladdr:$Rs), (A2_tfrsi s16Ext:$Rs)>; +def: Pat<(HexagonCONST32_GP tblockaddress:$Rs), (A2_tfrsi s16Ext:$Rs)>; +def: Pat<(HexagonCONST32_GP tglobaladdr:$Rs), (A2_tfrsi s16Ext:$Rs)>; let AddedComplexity = 30 in { - def: Storea_pat; - def: Storea_pat; - def: Storea_pat; + def: Storea_pat; + def: Storea_pat; + def: Storea_pat; } let AddedComplexity = 30 in { - def: Loada_pat; - def: Loada_pat; - def: Loada_pat; - def: Loada_pat; - def: Loada_pat; + def: Loada_pat; + def: Loada_pat; + def: Loada_pat; + def: Loada_pat; + def: Loada_pat; } // Indexed store word - global address. @@ -3872,22 +3871,18 @@ def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))), // Transfer global address into a register let isExtended = 1, opExtendable = 1, AddedComplexity=50, isMoveImm = 1, -isAsCheapAsAMove = 1, isReMaterializable = 1, validSubTargets = HasV4SubT, -isCodeGenOnly = 1 in +isAsCheapAsAMove = 1, isReMaterializable = 1, isCodeGenOnly = 1 in def TFRI_V4 : ALU32_ri<(outs IntRegs:$dst), (ins s16Ext:$src1), "$dst = #$src1", - [(set IntRegs:$dst, (HexagonCONST32 tglobaladdr:$src1))]>, - Requires<[HasV4T]>; + [(set IntRegs:$dst, (HexagonCONST32 tglobaladdr:$src1))]>; // Transfer a block address into a register def : Pat<(HexagonCONST32_GP tblockaddress:$src1), - (TFRI_V4 tblockaddress:$src1)>, - Requires<[HasV4T]>; + (TFRI_V4 tblockaddress:$src1)>; -let AddedComplexity = 50, Predicates = [HasV4T] in +let AddedComplexity = 50 in def : Pat<(HexagonCONST32_GP tglobaladdr:$src1), - (TFRI_V4 tglobaladdr:$src1)>, - Requires<[HasV4T]>; + (TFRI_V4 tglobaladdr:$src1)>; // i8/i16/i32 -> i64 loads // We need a complexity of 120 here to override preceding handling of @@ -3904,6 +3899,7 @@ let AddedComplexity = 120 in { def: Loadam_pat; def: Loadam_pat; def: Loadam_pat; +} let AddedComplexity = 100 in { def: Loada_pat; @@ -3917,7 +3913,7 @@ let AddedComplexity = 100 in { def: Loada_pat; def: Loada_pat; } -} + let AddedComplexity = 100 in { def: Storea_pat; def: Storea_pat; @@ -3935,12 +3931,24 @@ def: Storea_pat, I32, addrgp, S2_storerhabs>; def: Storea_pat, I32, addrgp, S2_storeriabs>; def: Storea_pat, I64, addrgp, S2_storerdabs>; +let Constraints = "@earlyclobber $dst" in +def Insert4 : PseudoM<(outs DoubleRegs:$dst), (ins IntRegs:$a, IntRegs:$b, + IntRegs:$c, IntRegs:$d), + ".error \"Should never try to emit Insert4\"", + [(set (i64 DoubleRegs:$dst), + (or (or (or (shl (i64 (zext (i32 (and (i32 IntRegs:$b), (i32 65535))))), + (i32 16)), + (i64 (zext (i32 (and (i32 IntRegs:$a), (i32 65535)))))), + (shl (i64 (anyext (i32 (and (i32 IntRegs:$c), (i32 65535))))), + (i32 32))), + (shl (i64 (anyext (i32 IntRegs:$d))), (i32 48))))]>; + //===----------------------------------------------------------------------===// // :raw for of boundscheck:hi:lo insns //===----------------------------------------------------------------------===// // A4_boundscheck_lo: Detect if a register is within bounds. -let hasSideEffects = 0, isCodeGenOnly = 0 in +let hasSideEffects = 0 in def A4_boundscheck_lo: ALU64Inst < (outs PredRegs:$Pd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), @@ -3960,7 +3968,7 @@ def A4_boundscheck_lo: ALU64Inst < } // A4_boundscheck_hi: Detect if a register is within bounds. -let hasSideEffects = 0, isCodeGenOnly = 0 in +let hasSideEffects = 0 in def A4_boundscheck_hi: ALU64Inst < (outs PredRegs:$Pd), (ins DoubleRegs:$Rss, DoubleRegs:$Rtt), @@ -3985,7 +3993,7 @@ def A4_boundscheck : MInst < "$Pd=boundscheck($Rs,$Rtt)">; // A4_tlbmatch: Detect if a VA/ASID matches a TLB entry. -let isPredicateLate = 1, hasSideEffects = 0, isCodeGenOnly = 0 in +let isPredicateLate = 1, hasSideEffects = 0 in def A4_tlbmatch : ALU64Inst<(outs PredRegs:$Pd), (ins DoubleRegs:$Rs, IntRegs:$Rt), "$Pd = tlbmatch($Rs, $Rt)", @@ -4012,7 +4020,7 @@ def HexagonDCFETCH : SDNode<"HexagonISD::DCFETCH", SDTHexagonDCFETCH, // Use LD0Inst for dcfetch, but set "mayLoad" to 0 because this doesn't // really do a load. -let hasSideEffects = 1, mayLoad = 0, isCodeGenOnly = 0 in +let hasSideEffects = 1, mayLoad = 0 in def Y2_dcfetchbo : LD0Inst<(outs), (ins IntRegs:$Rs, u11_3Imm:$u11_3), "dcfetch($Rs + #$u11_3)", [(HexagonDCFETCH IntRegs:$Rs, u11_3ImmPred:$u11_3)], @@ -4034,12 +4042,12 @@ def Y2_dcfetchbo : LD0Inst<(outs), (ins IntRegs:$Rs, u11_3Imm:$u11_3), let isBranch = 1, hasSideEffects = 0, isExtentSigned = 1, isPredicated = 1, isPredicatedNew = 1, isExtendable = 1, opExtentBits = 11, opExtentAlign = 2, opExtendable = 1, - isTerminator = 1, validSubTargets = HasV4SubT in + isTerminator = 1 in class CJInst_tstbit_R0 : InstHexagon<(outs), (ins IntRegs:$Rs, brtarget:$r9_2), ""#px#" = tstbit($Rs, #0); if (" #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2", - [], "", COMPOUND, TypeCOMPOUND> { + [], "", COMPOUND, TypeCOMPOUND>, OpcodeHexagon { bits<4> Rs; bits<11> r9_2; @@ -4062,14 +4070,14 @@ class CJInst_tstbit_R0 let Inst{7-1} = r9_2{8-2}; } -let Defs = [PC, P0], Uses = [P0], isCodeGenOnly = 0 in { +let Defs = [PC, P0], Uses = [P0] in { def J4_tstbit0_tp0_jump_nt : CJInst_tstbit_R0<"p0", 0, "nt">; def J4_tstbit0_tp0_jump_t : CJInst_tstbit_R0<"p0", 0, "t">; def J4_tstbit0_fp0_jump_nt : CJInst_tstbit_R0<"p0", 1, "nt">; def J4_tstbit0_fp0_jump_t : CJInst_tstbit_R0<"p0", 1, "t">; } -let Defs = [PC, P1], Uses = [P1], isCodeGenOnly = 0 in { +let Defs = [PC, P1], Uses = [P1] in { def J4_tstbit0_tp1_jump_nt : CJInst_tstbit_R0<"p1", 0, "nt">; def J4_tstbit0_tp1_jump_t : CJInst_tstbit_R0<"p1", 0, "t">; def J4_tstbit0_fp1_jump_nt : CJInst_tstbit_R0<"p1", 1, "nt">; @@ -4080,12 +4088,12 @@ let Defs = [PC, P1], Uses = [P1], isCodeGenOnly = 0 in { let isBranch = 1, hasSideEffects = 0, isExtentSigned = 1, isPredicated = 1, isPredicatedNew = 1, isExtendable = 1, opExtentBits = 11, opExtentAlign = 2, - opExtendable = 2, isTerminator = 1, validSubTargets = HasV4SubT in + opExtendable = 2, isTerminator = 1 in class CJInst_RR : InstHexagon<(outs), (ins IntRegs:$Rs, IntRegs:$Rt, brtarget:$r9_2), ""#px#" = cmp."#op#"($Rs, $Rt); if (" #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2", - [], "", COMPOUND, TypeCOMPOUND> { + [], "", COMPOUND, TypeCOMPOUND>, OpcodeHexagon { bits<4> Rs; bits<4> Rt; bits<11> r9_2; @@ -4128,21 +4136,18 @@ multiclass T_pnp_CJInst_RR{ defm J4_cmp#NAME#_f : T_tnt_CJInst_RR; } // TypeCJ Instructions compare RR and jump -let isCodeGenOnly = 0 in { defm eq : T_pnp_CJInst_RR<"eq">; defm gt : T_pnp_CJInst_RR<"gt">; defm gtu : T_pnp_CJInst_RR<"gtu">; -} let isBranch = 1, hasSideEffects = 0, isExtentSigned = 1, isPredicated = 1, isPredicatedNew = 1, isExtendable = 1, opExtentBits = 11, - opExtentAlign = 2, opExtendable = 2, isTerminator = 1, - validSubTargets = HasV4SubT in + opExtentAlign = 2, opExtendable = 2, isTerminator = 1 in class CJInst_RU5 : InstHexagon<(outs), (ins IntRegs:$Rs, u5Imm:$U5, brtarget:$r9_2), ""#px#" = cmp."#op#"($Rs, #$U5); if (" #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2", - [], "", COMPOUND, TypeCOMPOUND> { + [], "", COMPOUND, TypeCOMPOUND>, OpcodeHexagon { bits<4> Rs; bits<5> U5; bits<11> r9_2; @@ -4185,21 +4190,19 @@ multiclass T_pnp_CJInst_RU5{ defm J4_cmp#NAME#i_f : T_tnt_CJInst_RU5; } // TypeCJ Instructions compare RI and jump -let isCodeGenOnly = 0 in { defm eq : T_pnp_CJInst_RU5<"eq">; defm gt : T_pnp_CJInst_RU5<"gt">; defm gtu : T_pnp_CJInst_RU5<"gtu">; -} let isBranch = 1, hasSideEffects = 0, isExtentSigned = 1, isPredicated = 1, isPredicatedFalse = 1, isPredicatedNew = 1, isExtendable = 1, opExtentBits = 11, opExtentAlign = 2, opExtendable = 1, - isTerminator = 1, validSubTargets = HasV4SubT in + isTerminator = 1 in class CJInst_Rn1 : InstHexagon<(outs), (ins IntRegs:$Rs, brtarget:$r9_2), ""#px#" = cmp."#op#"($Rs,#-1); if (" #!if(np, "!","")#""#px#".new) jump:"#tnt#" $r9_2", - [], "", COMPOUND, TypeCOMPOUND> { + [], "", COMPOUND, TypeCOMPOUND>, OpcodeHexagon { bits<4> Rs; bits<11> r9_2; @@ -4241,16 +4244,13 @@ multiclass T_pnp_CJInst_Rn1{ defm J4_cmp#NAME#n1_f : T_tnt_CJInst_Rn1; } // TypeCJ Instructions compare -1 and jump -let isCodeGenOnly = 0 in { defm eq : T_pnp_CJInst_Rn1<"eq">; defm gt : T_pnp_CJInst_Rn1<"gt">; -} // J4_jumpseti: Direct unconditional jump and set register to immediate. let Defs = [PC], isBranch = 1, hasSideEffects = 0, hasNewValue = 1, isExtentSigned = 1, opNewValue = 0, isExtendable = 1, opExtentBits = 11, - opExtentAlign = 2, opExtendable = 2, validSubTargets = HasV4SubT, - isCodeGenOnly = 0 in + opExtentAlign = 2, opExtendable = 2 in def J4_jumpseti: CJInst < (outs IntRegs:$Rd), (ins u6Imm:$U6, brtarget:$r9_2), @@ -4270,8 +4270,7 @@ def J4_jumpseti: CJInst < // J4_jumpsetr: Direct unconditional jump and transfer register. let Defs = [PC], isBranch = 1, hasSideEffects = 0, hasNewValue = 1, isExtentSigned = 1, opNewValue = 0, isExtendable = 1, opExtentBits = 11, - opExtentAlign = 2, opExtendable = 2, validSubTargets = HasV4SubT, - isCodeGenOnly = 0 in + opExtentAlign = 2, opExtendable = 2 in def J4_jumpsetr: CJInst < (outs IntRegs:$Rd), (ins IntRegs:$Rs, brtarget:$r9_2), @@ -4287,3 +4286,7 @@ def J4_jumpsetr: CJInst < let Inst{19-16} = Rs; let Inst{7-1} = r9_2{8-2}; } + +// Duplex instructions +//===----------------------------------------------------------------------===// +include "HexagonIsetDx.td"