0x0000001e, "BUFFER_STORE_DWORDX4", VReg_128, v4i32, global_store
>;
//def BUFFER_ATOMIC_SWAP : MUBUF_ <0x00000030, "BUFFER_ATOMIC_SWAP", []>;
+defm BUFFER_ATOMIC_SWAP : MUBUF_Atomic <
+ 0x00000030, "BUFFER_ATOMIC_SWAP", VReg_32, i32, atomic_swap_global
+>;
//def BUFFER_ATOMIC_CMPSWAP : MUBUF_ <0x00000031, "BUFFER_ATOMIC_CMPSWAP", []>;
defm BUFFER_ATOMIC_ADD : MUBUF_Atomic <
0x00000032, "BUFFER_ATOMIC_ADD", VReg_32, i32, atomic_add_global
>;
-//def BUFFER_ATOMIC_SUB : MUBUF_ <0x00000033, "BUFFER_ATOMIC_SUB", []>;
+defm BUFFER_ATOMIC_SUB : MUBUF_Atomic <
+ 0x00000033, "BUFFER_ATOMIC_SUB", VReg_32, i32, atomic_sub_global
+>;
//def BUFFER_ATOMIC_RSUB : MUBUF_ <0x00000034, "BUFFER_ATOMIC_RSUB", []>;
-//def BUFFER_ATOMIC_SMIN : MUBUF_ <0x00000035, "BUFFER_ATOMIC_SMIN", []>;
-//def BUFFER_ATOMIC_UMIN : MUBUF_ <0x00000036, "BUFFER_ATOMIC_UMIN", []>;
-//def BUFFER_ATOMIC_SMAX : MUBUF_ <0x00000037, "BUFFER_ATOMIC_SMAX", []>;
-//def BUFFER_ATOMIC_UMAX : MUBUF_ <0x00000038, "BUFFER_ATOMIC_UMAX", []>;
-//def BUFFER_ATOMIC_AND : MUBUF_ <0x00000039, "BUFFER_ATOMIC_AND", []>;
-//def BUFFER_ATOMIC_OR : MUBUF_ <0x0000003a, "BUFFER_ATOMIC_OR", []>;
-//def BUFFER_ATOMIC_XOR : MUBUF_ <0x0000003b, "BUFFER_ATOMIC_XOR", []>;
+defm BUFFER_ATOMIC_SMIN : MUBUF_Atomic <
+ 0x00000035, "BUFFER_ATOMIC_SMIN", VReg_32, i32, atomic_min_global
+>;
+defm BUFFER_ATOMIC_UMIN : MUBUF_Atomic <
+ 0x00000036, "BUFFER_ATOMIC_UMIN", VReg_32, i32, atomic_umin_global
+>;
+defm BUFFER_ATOMIC_SMAX : MUBUF_Atomic <
+ 0x00000037, "BUFFER_ATOMIC_SMAX", VReg_32, i32, atomic_max_global
+>;
+defm BUFFER_ATOMIC_UMAX : MUBUF_Atomic <
+ 0x00000038, "BUFFER_ATOMIC_UMAX", VReg_32, i32, atomic_umax_global
+>;
+defm BUFFER_ATOMIC_AND : MUBUF_Atomic <
+ 0x00000039, "BUFFER_ATOMIC_AND", VReg_32, i32, atomic_and_global
+>;
+defm BUFFER_ATOMIC_OR : MUBUF_Atomic <
+ 0x0000003a, "BUFFER_ATOMIC_OR", VReg_32, i32, atomic_or_global
+>;
+defm BUFFER_ATOMIC_XOR : MUBUF_Atomic <
+ 0x0000003b, "BUFFER_ATOMIC_XOR", VReg_32, i32, atomic_xor_global
+>;
//def BUFFER_ATOMIC_INC : MUBUF_ <0x0000003c, "BUFFER_ATOMIC_INC", []>;
//def BUFFER_ATOMIC_DEC : MUBUF_ <0x0000003d, "BUFFER_ATOMIC_DEC", []>;
//def BUFFER_ATOMIC_FCMPSWAP : MUBUF_ <0x0000003e, "BUFFER_ATOMIC_FCMPSWAP", []>;
VOP_F32_F32_F32, AMDGPUfmax
>;
-defm V_MIN_F32 : VOP2Inst <vop2<0xf>, "V_MIN_F32", VOP_F32_F32_F32>;
-defm V_MAX_F32 : VOP2Inst <vop2<0x10>, "V_MAX_F32", VOP_F32_F32_F32>;
+defm V_MIN_F32 : VOP2Inst <vop2<0xf>, "V_MIN_F32", VOP_F32_F32_F32, fminnum>;
+defm V_MAX_F32 : VOP2Inst <vop2<0x10>, "V_MAX_F32", VOP_F32_F32_F32, fmaxnum>;
defm V_MIN_I32 : VOP2Inst <vop2<0x11>, "V_MIN_I32", VOP_I32_I32_I32, AMDGPUsmin>;
defm V_MAX_I32 : VOP2Inst <vop2<0x12>, "V_MAX_I32", VOP_I32_I32_I32, AMDGPUsmax>;
defm V_MIN_U32 : VOP2Inst <vop2<0x13>, "V_MIN_U32", VOP_I32_I32_I32, AMDGPUumin>;
let isCommutable = 1, Defs = [VCC] in { // Carry-out goes to VCC
// No patterns so that the scalar instructions are always selected.
// The scalar versions will be replaced with vector when needed later.
-defm V_ADD_I32 : VOP2bInst <0x00000025, "V_ADD_I32",
+defm V_ADD_I32 : VOP2bInst <vop2<0x25>, "V_ADD_I32",
VOP_I32_I32_I32, add
>;
-defm V_SUB_I32 : VOP2bInst <0x00000026, "V_SUB_I32",
+defm V_SUB_I32 : VOP2bInst <vop2<0x26>, "V_SUB_I32",
VOP_I32_I32_I32, sub
>;
-defm V_SUBREV_I32 : VOP2bInst <0x00000027, "V_SUBREV_I32",
+defm V_SUBREV_I32 : VOP2bInst <vop2<0x27>, "V_SUBREV_I32",
VOP_I32_I32_I32, null_frag, "V_SUB_I32"
>;
let Uses = [VCC] in { // Carry-in comes from VCC
-defm V_ADDC_U32 : VOP2bInst <0x00000028, "V_ADDC_U32",
+defm V_ADDC_U32 : VOP2bInst <vop2<0x28>, "V_ADDC_U32",
VOP_I32_I32_I32_VCC, adde
>;
-defm V_SUBB_U32 : VOP2bInst <0x00000029, "V_SUBB_U32",
+defm V_SUBB_U32 : VOP2bInst <vop2<0x29>, "V_SUBB_U32",
VOP_I32_I32_I32_VCC, sube
>;
-defm V_SUBBREV_U32 : VOP2bInst <0x0000002a, "V_SUBBREV_U32",
+defm V_SUBBREV_U32 : VOP2bInst <vop2<0x2a>, "V_SUBBREV_U32",
VOP_I32_I32_I32_VCC, null_frag, "V_SUBB_U32"
>;
// VOP3 Instructions
//===----------------------------------------------------------------------===//
-defm V_MAD_LEGACY_F32 : VOP3Inst <0x00000140, "V_MAD_LEGACY_F32",
+defm V_MAD_LEGACY_F32 : VOP3Inst <vop3<0x140>, "V_MAD_LEGACY_F32",
VOP_F32_F32_F32_F32
>;
-defm V_MAD_F32 : VOP3Inst <0x00000141, "V_MAD_F32",
+defm V_MAD_F32 : VOP3Inst <vop3<0x141>, "V_MAD_F32",
VOP_F32_F32_F32_F32, fmad
>;
-defm V_MAD_I32_I24 : VOP3Inst <0x00000142, "V_MAD_I32_I24",
+defm V_MAD_I32_I24 : VOP3Inst <vop3<0x142>, "V_MAD_I32_I24",
VOP_I32_I32_I32_I32, AMDGPUmad_i24
>;
-defm V_MAD_U32_U24 : VOP3Inst <0x00000143, "V_MAD_U32_U24",
+defm V_MAD_U32_U24 : VOP3Inst <vop3<0x143>, "V_MAD_U32_U24",
VOP_I32_I32_I32_I32, AMDGPUmad_u24
>;
-defm V_CUBEID_F32 : VOP3Inst <0x00000144, "V_CUBEID_F32",
+defm V_CUBEID_F32 : VOP3Inst <vop3<0x144>, "V_CUBEID_F32",
VOP_F32_F32_F32_F32
>;
-defm V_CUBESC_F32 : VOP3Inst <0x00000145, "V_CUBESC_F32",
+defm V_CUBESC_F32 : VOP3Inst <vop3<0x145>, "V_CUBESC_F32",
VOP_F32_F32_F32_F32
>;
-defm V_CUBETC_F32 : VOP3Inst <0x00000146, "V_CUBETC_F32",
+defm V_CUBETC_F32 : VOP3Inst <vop3<0x146>, "V_CUBETC_F32",
VOP_F32_F32_F32_F32
>;
-defm V_CUBEMA_F32 : VOP3Inst <0x00000147, "V_CUBEMA_F32",
+defm V_CUBEMA_F32 : VOP3Inst <vop3<0x147>, "V_CUBEMA_F32",
VOP_F32_F32_F32_F32
>;
-
-let neverHasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
-defm V_BFE_U32 : VOP3Inst <0x00000148, "V_BFE_U32",
+defm V_BFE_U32 : VOP3Inst <vop3<0x148>, "V_BFE_U32",
VOP_I32_I32_I32_I32, AMDGPUbfe_u32
>;
-defm V_BFE_I32 : VOP3Inst <0x00000149, "V_BFE_I32",
+defm V_BFE_I32 : VOP3Inst <vop3<0x149>, "V_BFE_I32",
VOP_I32_I32_I32_I32, AMDGPUbfe_i32
>;
-}
-
-defm V_BFI_B32 : VOP3Inst <0x0000014a, "V_BFI_B32",
+defm V_BFI_B32 : VOP3Inst <vop3<0x14a>, "V_BFI_B32",
VOP_I32_I32_I32_I32, AMDGPUbfi
>;
-defm V_FMA_F32 : VOP3Inst <0x0000014b, "V_FMA_F32",
+defm V_FMA_F32 : VOP3Inst <vop3<0x14b>, "V_FMA_F32",
VOP_F32_F32_F32_F32, fma
>;
-defm V_FMA_F64 : VOP3Inst <0x0000014c, "V_FMA_F64",
+defm V_FMA_F64 : VOP3Inst <vop3<0x14c>, "V_FMA_F64",
VOP_F64_F64_F64_F64, fma
>;
//def V_LERP_U8 : VOP3_U8 <0x0000014d, "V_LERP_U8", []>;
-defm V_ALIGNBIT_B32 : VOP3Inst <0x0000014e, "V_ALIGNBIT_B32",
+defm V_ALIGNBIT_B32 : VOP3Inst <vop3<0x14e>, "V_ALIGNBIT_B32",
VOP_I32_I32_I32_I32
>;
-defm V_ALIGNBYTE_B32 : VOP3Inst <0x0000014f, "V_ALIGNBYTE_B32",
+defm V_ALIGNBYTE_B32 : VOP3Inst <vop3<0x14f>, "V_ALIGNBYTE_B32",
VOP_I32_I32_I32_I32
>;
-defm V_MULLIT_F32 : VOP3Inst <0x00000150, "V_MULLIT_F32",
+defm V_MULLIT_F32 : VOP3Inst <vop3<0x150>, "V_MULLIT_F32",
VOP_F32_F32_F32_F32>;
////def V_MIN3_F32 : VOP3_MIN3 <0x00000151, "V_MIN3_F32", []>;
////def V_MIN3_I32 : VOP3_MIN3 <0x00000152, "V_MIN3_I32", []>;
//def V_SAD_U8 : VOP3_U8 <0x0000015a, "V_SAD_U8", []>;
//def V_SAD_HI_U8 : VOP3_U8 <0x0000015b, "V_SAD_HI_U8", []>;
//def V_SAD_U16 : VOP3_U16 <0x0000015c, "V_SAD_U16", []>;
-defm V_SAD_U32 : VOP3Inst <0x0000015d, "V_SAD_U32",
+defm V_SAD_U32 : VOP3Inst <vop3<0x15d>, "V_SAD_U32",
VOP_I32_I32_I32_I32
>;
////def V_CVT_PK_U8_F32 : VOP3_U8 <0x0000015e, "V_CVT_PK_U8_F32", []>;
defm V_DIV_FIXUP_F32 : VOP3Inst <
- 0x0000015f, "V_DIV_FIXUP_F32", VOP_F32_F32_F32_F32, AMDGPUdiv_fixup
+ vop3<0x15f>, "V_DIV_FIXUP_F32", VOP_F32_F32_F32_F32, AMDGPUdiv_fixup
>;
defm V_DIV_FIXUP_F64 : VOP3Inst <
- 0x00000160, "V_DIV_FIXUP_F64", VOP_F64_F64_F64_F64, AMDGPUdiv_fixup
+ vop3<0x160>, "V_DIV_FIXUP_F64", VOP_F64_F64_F64_F64, AMDGPUdiv_fixup
>;
-defm V_LSHL_B64 : VOP3Inst <0x00000161, "V_LSHL_B64",
+defm V_LSHL_B64 : VOP3Inst <vop3<0x161>, "V_LSHL_B64",
VOP_I64_I64_I32, shl
>;
-defm V_LSHR_B64 : VOP3Inst <0x00000162, "V_LSHR_B64",
+defm V_LSHR_B64 : VOP3Inst <vop3<0x162>, "V_LSHR_B64",
VOP_I64_I64_I32, srl
>;
-defm V_ASHR_I64 : VOP3Inst <0x00000163, "V_ASHR_I64",
+defm V_ASHR_I64 : VOP3Inst <vop3<0x163>, "V_ASHR_I64",
VOP_I64_I64_I32, sra
>;
let isCommutable = 1 in {
-defm V_ADD_F64 : VOP3Inst <0x00000164, "V_ADD_F64",
+defm V_ADD_F64 : VOP3Inst <vop3<0x164>, "V_ADD_F64",
VOP_F64_F64_F64, fadd
>;
-defm V_MUL_F64 : VOP3Inst <0x00000165, "V_MUL_F64",
+defm V_MUL_F64 : VOP3Inst <vop3<0x165>, "V_MUL_F64",
VOP_F64_F64_F64, fmul
>;
-defm V_MIN_F64 : VOP3Inst <0x00000166, "V_MIN_F64",
- VOP_F64_F64_F64
+
+defm V_MIN_F64 : VOP3Inst <vop3<0x166>, "V_MIN_F64",
+ VOP_F64_F64_F64, fminnum
>;
-defm V_MAX_F64 : VOP3Inst <0x00000167, "V_MAX_F64",
- VOP_F64_F64_F64
+defm V_MAX_F64 : VOP3Inst <vop3<0x167>, "V_MAX_F64",
+ VOP_F64_F64_F64, fmaxnum
>;
} // isCommutable = 1
-defm V_LDEXP_F64 : VOP3Inst <0x00000168, "V_LDEXP_F64",
+defm V_LDEXP_F64 : VOP3Inst <vop3<0x168>, "V_LDEXP_F64",
VOP_F64_F64_I32, AMDGPUldexp
>;
let isCommutable = 1 in {
-defm V_MUL_LO_U32 : VOP3Inst <0x00000169, "V_MUL_LO_U32",
+defm V_MUL_LO_U32 : VOP3Inst <vop3<0x169>, "V_MUL_LO_U32",
VOP_I32_I32_I32
>;
-defm V_MUL_HI_U32 : VOP3Inst <0x0000016a, "V_MUL_HI_U32",
+defm V_MUL_HI_U32 : VOP3Inst <vop3<0x16a>, "V_MUL_HI_U32",
VOP_I32_I32_I32
>;
-defm V_MUL_LO_I32 : VOP3Inst <0x0000016b, "V_MUL_LO_I32",
+defm V_MUL_LO_I32 : VOP3Inst <vop3<0x16b>, "V_MUL_LO_I32",
VOP_I32_I32_I32
>;
-defm V_MUL_HI_I32 : VOP3Inst <0x0000016c, "V_MUL_HI_I32",
+defm V_MUL_HI_I32 : VOP3Inst <vop3<0x16c>, "V_MUL_HI_I32",
VOP_I32_I32_I32
>;
} // isCommutable = 1
-defm V_DIV_SCALE_F32 : VOP3b_32 <0x0000016d, "V_DIV_SCALE_F32", []>;
+defm V_DIV_SCALE_F32 : VOP3b_32 <vop3<0x16d>, "V_DIV_SCALE_F32", []>;
// Double precision division pre-scale.
-defm V_DIV_SCALE_F64 : VOP3b_64 <0x0000016e, "V_DIV_SCALE_F64", []>;
+defm V_DIV_SCALE_F64 : VOP3b_64 <vop3<0x16e>, "V_DIV_SCALE_F64", []>;
-defm V_DIV_FMAS_F32 : VOP3Inst <0x0000016f, "V_DIV_FMAS_F32",
+defm V_DIV_FMAS_F32 : VOP3Inst <vop3<0x16f>, "V_DIV_FMAS_F32",
VOP_F32_F32_F32_F32, AMDGPUdiv_fmas
>;
-defm V_DIV_FMAS_F64 : VOP3Inst <0x00000170, "V_DIV_FMAS_F64",
+defm V_DIV_FMAS_F64 : VOP3Inst <vop3<0x170>, "V_DIV_FMAS_F64",
VOP_F64_F64_F64_F64, AMDGPUdiv_fmas
>;
//def V_MSAD_U8 : VOP3_U8 <0x00000171, "V_MSAD_U8", []>;
//def V_QSAD_U8 : VOP3_U8 <0x00000172, "V_QSAD_U8", []>;
//def V_MQSAD_U8 : VOP3_U8 <0x00000173, "V_MQSAD_U8", []>;
defm V_TRIG_PREOP_F64 : VOP3Inst <
- 0x00000174, "V_TRIG_PREOP_F64", VOP_F64_F64_I32, AMDGPUtrig_preop
+ vop3<0x174>, "V_TRIG_PREOP_F64", VOP_F64_F64_I32, AMDGPUtrig_preop
>;
//===----------------------------------------------------------------------===//
"", []
>;
-def SI_BUFFER_RSRC : InstSI <
- (outs SReg_128:$srsrc),
- (ins SReg_32:$ptr_lo, SReg_32:$ptr_hi, SSrc_32:$data_lo, SSrc_32:$data_hi),
- "", []
->;
-
def V_SUB_F64 : InstSI <
(outs VReg_64:$dst),
(ins VReg_64:$src0, VReg_64:$src1),
def : Pat <
(i64 (ctpop i64:$src)),
- (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
- (S_BCNT1_I32_B64 $src), sub0),
- (S_MOV_B32 0), sub1)
+ (i64 (REG_SEQUENCE SReg_64,
+ (S_BCNT1_I32_B64 $src), sub0,
+ (S_MOV_B32 0), sub1))
>;
//===----------------------------------------------------------------------===//
// VOP2 Patterns
//===----------------------------------------------------------------------===//
-class BinOp64Pat <SDNode node, Instruction inst> : Pat <
- (node i64:$src0, i64:$src1),
- (INSERT_SUBREG (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
- (inst (EXTRACT_SUBREG i64:$src0, sub0),
- (EXTRACT_SUBREG i64:$src1, sub0)), sub0),
- (inst (EXTRACT_SUBREG i64:$src0, sub1),
- (EXTRACT_SUBREG i64:$src1, sub1)), sub1)
->;
-
-def : BinOp64Pat <and, V_AND_B32_e64>;
-def : BinOp64Pat <or, V_OR_B32_e64>;
-def : BinOp64Pat <xor, V_XOR_B32_e64>;
-
-class SextInReg <ValueType vt, int ShiftAmt> : Pat <
- (sext_inreg i32:$src0, vt),
- (V_ASHRREV_I32_e32 ShiftAmt, (V_LSHLREV_B32_e32 ShiftAmt, $src0))
->;
-
-def : SextInReg <i8, 24>;
-def : SextInReg <i16, 16>;
-
def : Pat <
(i32 (add (i32 (ctpop i32:$popcnt)), i32:$val)),
(V_BCNT_U32_B32_e64 $popcnt, $val)
>;
-def : Pat <
- (i32 (ctpop i32:$popcnt)),
- (V_BCNT_U32_B32_e64 $popcnt, 0)
->;
-
-def : Pat <
- (i64 (ctpop i64:$src)),
- (INSERT_SUBREG
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
- (V_BCNT_U32_B32_e32 (EXTRACT_SUBREG $src, sub1),
- (V_BCNT_U32_B32_e64 (EXTRACT_SUBREG $src, sub0), 0)),
- sub0),
- (V_MOV_B32_e32 0), sub1)
->;
-
-def : Pat <
- (addc i32:$src0, i32:$src1),
- (V_ADD_I32_e64 $src0, $src1)
->;
-
/********** ======================= **********/
/********** Image sampling patterns **********/
/********** ======================= **********/
def : IMad24Pat<V_MAD_I32_I24>;
def : UMad24Pat<V_MAD_U32_U24>;
-def : Pat <
- (mul i32:$src0, i32:$src1),
- (V_MUL_LO_I32 $src0, $src1)
->;
-
def : Pat <
(mulhu i32:$src0, i32:$src1),
(V_MUL_HI_U32 $src0, $src1)
multiclass MUBUFLoad_Pattern <MUBUF Instr_ADDR64, ValueType vt,
PatFrag constant_ld> {
def : Pat <
- (vt (constant_ld (add i64:$ptr, i64:$offset))),
- (Instr_ADDR64 (SI_ADDR64_RSRC $ptr), $offset, 0)
+ (vt (constant_ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i16:$offset))),
+ (Instr_ADDR64 $srsrc, $vaddr, $offset)
>;
-
}
defm : MUBUFLoad_Pattern <BUFFER_LOAD_SBYTE_ADDR64, i32, sextloadi8_constant>;
VOP_F64_F64, frint
>;
-defm V_QSAD_PK_U16_U8 : VOP3Inst <0x00000173, "V_QSAD_PK_U16_U8",
+defm V_QSAD_PK_U16_U8 : VOP3Inst <vop3<0x173>, "V_QSAD_PK_U16_U8",
VOP_I32_I32_I32
>;
-defm V_MQSAD_U16_U8 : VOP3Inst <0x000000172, "V_MQSAD_U16_U8",
+defm V_MQSAD_U16_U8 : VOP3Inst <vop3<0x172>, "V_MQSAD_U16_U8",
VOP_I32_I32_I32
>;
-defm V_MQSAD_U32_U8 : VOP3Inst <0x00000175, "V_MQSAD_U32_U8",
+defm V_MQSAD_U32_U8 : VOP3Inst <vop3<0x175>, "V_MQSAD_U32_U8",
VOP_I32_I32_I32
>;
-defm V_MAD_U64_U32 : VOP3Inst <0x00000176, "V_MAD_U64_U32",
+defm V_MAD_U64_U32 : VOP3Inst <vop3<0x176>, "V_MAD_U64_U32",
VOP_I64_I32_I32_I64
>;
// XXX - Does this set VCC?
-defm V_MAD_I64_I32 : VOP3Inst <0x00000177, "V_MAD_I64_I32",
+defm V_MAD_I64_I32 : VOP3Inst <vop3<0x177>, "V_MAD_I64_I32",
VOP_I64_I32_I32_I64
>;
(V_CMP_EQ_I32_e64 (V_AND_B32_e64 (i32 1), $a), 1)
>;
+def : Pat <
+ (i32 (bswap i32:$a)),
+ (V_BFI_B32 (S_MOV_B32 0x00ff00ff),
+ (V_ALIGNBIT_B32 $a, $a, 24),
+ (V_ALIGNBIT_B32 $a, $a, 8))
+>;
+
//============================================================================//
// Miscellaneous Optimization Patterns
//============================================================================//