1 let isCodeGenOnly = 1 in {
2 /// Arithmetic Instructions (ALU Immediate)
3 def ADDiu_MM : MMRel, ArithLogicI<"addiu", simm16, CPURegsOpnd>,
5 def ADDi_MM : MMRel, ArithLogicI<"addi", simm16, CPURegsOpnd>,
7 def SLTi_MM : MMRel, SetCC_I<"slti", setlt, simm16, immSExt16, CPURegs>,
9 def SLTiu_MM : MMRel, SetCC_I<"sltiu", setult, simm16, immSExt16, CPURegs>,
11 def ANDi_MM : MMRel, ArithLogicI<"andi", uimm16, CPURegsOpnd, immZExt16,
14 def ORi_MM : MMRel, ArithLogicI<"ori", uimm16, CPURegsOpnd, immZExt16, or>,
16 def XORi_MM : MMRel, ArithLogicI<"xori", uimm16, CPURegsOpnd, immZExt16,
19 def LUi_MM : MMRel, LoadUpper<"lui", CPURegs, uimm16>, LUI_FM_MM;
21 /// Arithmetic Instructions (3-Operand, R-Type)
22 def ADDu_MM : MMRel, ArithLogicR<"addu", CPURegsOpnd>, ADD_FM_MM<0, 0x150>;
23 def SUBu_MM : MMRel, ArithLogicR<"subu", CPURegsOpnd>, ADD_FM_MM<0, 0x1d0>;
24 def MUL_MM : MMRel, ArithLogicR<"mul", CPURegsOpnd>, ADD_FM_MM<0, 0x210>;
25 def ADD_MM : MMRel, ArithLogicR<"add", CPURegsOpnd>, ADD_FM_MM<0, 0x110>;
26 def SUB_MM : MMRel, ArithLogicR<"sub", CPURegsOpnd>, ADD_FM_MM<0, 0x190>;
27 def SLT_MM : MMRel, SetCC_R<"slt", setlt, CPURegs>, ADD_FM_MM<0, 0x350>;
28 def SLTu_MM : MMRel, SetCC_R<"sltu", setult, CPURegs>,
30 def AND_MM : MMRel, ArithLogicR<"and", CPURegsOpnd, 1, IIAlu, and>,
32 def OR_MM : MMRel, ArithLogicR<"or", CPURegsOpnd, 1, IIAlu, or>,
34 def XOR_MM : MMRel, ArithLogicR<"xor", CPURegsOpnd, 1, IIAlu, xor>,
36 def NOR_MM : MMRel, LogicNOR<"nor", CPURegsOpnd>, ADD_FM_MM<0, 0x2d0>;
37 def MULT_MM : MMRel, Mult<"mult", IIImul, CPURegsOpnd, [HI, LO]>,
39 def MULTu_MM : MMRel, Mult<"multu", IIImul, CPURegsOpnd, [HI, LO]>,
42 /// Shift Instructions
43 def SLL_MM : MMRel, shift_rotate_imm<"sll", shamt, CPURegsOpnd>,
45 def SRL_MM : MMRel, shift_rotate_imm<"srl", shamt, CPURegsOpnd>,
47 def SRA_MM : MMRel, shift_rotate_imm<"sra", shamt, CPURegsOpnd>,
49 def SLLV_MM : MMRel, shift_rotate_reg<"sllv", CPURegsOpnd>,
51 def SRLV_MM : MMRel, shift_rotate_reg<"srlv", CPURegsOpnd>,
53 def SRAV_MM : MMRel, shift_rotate_reg<"srav", CPURegsOpnd>,
55 def ROTR_MM : MMRel, shift_rotate_imm<"rotr", shamt, CPURegsOpnd>,
57 def ROTRV_MM : MMRel, shift_rotate_reg<"rotrv", CPURegsOpnd>,
60 /// Load and Store Instructions - aligned
61 defm LB_MM : LoadM<"lb", CPURegs, sextloadi8>, MMRel, LW_FM_MM<0x7>;
62 defm LBu_MM : LoadM<"lbu", CPURegs, zextloadi8>, MMRel, LW_FM_MM<0x5>;
63 defm LH_MM : LoadM<"lh", CPURegs, sextloadi16>, MMRel, LW_FM_MM<0xf>;
64 defm LHu_MM : LoadM<"lhu", CPURegs, zextloadi16>, MMRel, LW_FM_MM<0xd>;
65 defm LW_MM : LoadM<"lw", CPURegs>, MMRel, LW_FM_MM<0x3f>;
66 defm SB_MM : StoreM<"sb", CPURegs, truncstorei8>, MMRel, LW_FM_MM<0x6>;
67 defm SH_MM : StoreM<"sh", CPURegs, truncstorei16>, MMRel, LW_FM_MM<0xe>;
68 defm SW_MM : StoreM<"sw", CPURegs>, MMRel, LW_FM_MM<0x3e>;