1 //===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains instruction defs that are common to all hw codegen
13 //===----------------------------------------------------------------------===//
15 class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction {
16 field bit isRegisterLoad = 0;
17 field bit isRegisterStore = 0;
19 let Namespace = "AMDGPU";
20 let OutOperandList = outs;
21 let InOperandList = ins;
23 let Pattern = pattern;
24 let Itinerary = NullALU;
26 let TSFlags{63} = isRegisterLoad;
27 let TSFlags{62} = isRegisterStore;
30 class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern>
31 : AMDGPUInst<outs, ins, asm, pattern> {
33 field bits<32> Inst = 0xffffffff;
37 def FP32Denormals : Predicate<"Subtarget.hasFP32Denormals()">;
38 def FP64Denormals : Predicate<"Subtarget.hasFP64Denormals()">;
39 def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
41 def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
42 def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
44 def u32imm : Operand<i32> {
45 let PrintMethod = "printU32ImmOperand";
48 def u16imm : Operand<i16> {
49 let PrintMethod = "printU16ImmOperand";
52 def u8imm : Operand<i8> {
53 let PrintMethod = "printU8ImmOperand";
56 //===--------------------------------------------------------------------===//
58 //===--------------------------------------------------------------------===//
59 def brtarget : Operand<OtherVT>;
61 //===----------------------------------------------------------------------===//
62 // PatLeafs for floating-point comparisons
63 //===----------------------------------------------------------------------===//
65 def COND_OEQ : PatLeaf <
67 [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}]
70 def COND_OGT : PatLeaf <
72 [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
75 def COND_OGE : PatLeaf <
77 [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}]
80 def COND_OLT : PatLeaf <
82 [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}]
85 def COND_OLE : PatLeaf <
87 [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}]
90 def COND_UNE : PatLeaf <
92 [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
95 def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>;
96 def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>;
98 //===----------------------------------------------------------------------===//
99 // PatLeafs for unsigned comparisons
100 //===----------------------------------------------------------------------===//
102 def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>;
103 def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>;
104 def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>;
105 def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>;
107 //===----------------------------------------------------------------------===//
108 // PatLeafs for signed comparisons
109 //===----------------------------------------------------------------------===//
111 def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>;
112 def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>;
113 def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>;
114 def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>;
116 //===----------------------------------------------------------------------===//
117 // PatLeafs for integer equality
118 //===----------------------------------------------------------------------===//
120 def COND_EQ : PatLeaf <
122 [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}]
125 def COND_NE : PatLeaf <
127 [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}]
130 def COND_NULL : PatLeaf <
135 //===----------------------------------------------------------------------===//
136 // Load/Store Pattern Fragments
137 //===----------------------------------------------------------------------===//
139 def global_store : PatFrag<(ops node:$val, node:$ptr),
140 (store node:$val, node:$ptr), [{
141 return isGlobalStore(dyn_cast<StoreSDNode>(N));
144 // Global address space loads
145 def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
146 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
149 // Constant address space loads
150 def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
151 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
154 def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
155 LoadSDNode *L = cast<LoadSDNode>(N);
156 return L->getExtensionType() == ISD::ZEXTLOAD ||
157 L->getExtensionType() == ISD::EXTLOAD;
160 def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
161 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
164 def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
165 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
168 def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
169 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
172 def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
173 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
176 def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
177 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
180 def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
181 return isLocalLoad(dyn_cast<LoadSDNode>(N));
184 def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
185 return isLocalLoad(dyn_cast<LoadSDNode>(N));
188 def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
189 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
192 def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
193 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
196 def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
197 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
200 def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
201 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
204 def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
205 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
208 def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
209 return isLocalLoad(dyn_cast<LoadSDNode>(N));
212 def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
213 return isLocalLoad(dyn_cast<LoadSDNode>(N));
216 def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
217 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
220 def az_extloadi32_global : PatFrag<(ops node:$ptr),
221 (az_extloadi32 node:$ptr), [{
222 return isGlobalLoad(dyn_cast<LoadSDNode>(N));
225 def az_extloadi32_constant : PatFrag<(ops node:$ptr),
226 (az_extloadi32 node:$ptr), [{
227 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
230 def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr),
231 (truncstorei8 node:$val, node:$ptr), [{
232 return isGlobalStore(dyn_cast<StoreSDNode>(N));
235 def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr),
236 (truncstorei16 node:$val, node:$ptr), [{
237 return isGlobalStore(dyn_cast<StoreSDNode>(N));
240 def local_store : PatFrag<(ops node:$val, node:$ptr),
241 (store node:$val, node:$ptr), [{
242 return isLocalStore(dyn_cast<StoreSDNode>(N));
245 def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr),
246 (truncstorei8 node:$val, node:$ptr), [{
247 return isLocalStore(dyn_cast<StoreSDNode>(N));
250 def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr),
251 (truncstorei16 node:$val, node:$ptr), [{
252 return isLocalStore(dyn_cast<StoreSDNode>(N));
255 def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
256 return isLocalLoad(dyn_cast<LoadSDNode>(N));
260 class local_binary_atomic_op<SDNode atomic_op> :
261 PatFrag<(ops node:$ptr, node:$value),
262 (atomic_op node:$ptr, node:$value), [{
263 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
267 def atomic_swap_local : local_binary_atomic_op<atomic_swap>;
268 def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>;
269 def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>;
270 def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>;
271 def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>;
272 def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>;
273 def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>;
274 def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>;
275 def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>;
276 def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>;
277 def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>;
279 def mskor_global : PatFrag<(ops node:$val, node:$ptr),
280 (AMDGPUstore_mskor node:$val, node:$ptr), [{
281 return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
284 def atomic_cmp_swap_32_local :
285 PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
286 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
287 AtomicSDNode *AN = cast<AtomicSDNode>(N);
288 return AN->getMemoryVT() == MVT::i32 &&
289 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
292 def atomic_cmp_swap_64_local :
293 PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
294 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
295 AtomicSDNode *AN = cast<AtomicSDNode>(N);
296 return AN->getMemoryVT() == MVT::i64 &&
297 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
302 int TWO_PI = 0x40c90fdb;
304 int TWO_PI_INV = 0x3e22f983;
305 int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
306 int FP32_NEG_ONE = 0xbf800000;
307 int FP32_ONE = 0x3f800000;
309 def CONST : Constants;
311 def FP_ZERO : PatLeaf <
313 [{return N->getValueAPF().isZero();}]
316 def FP_ONE : PatLeaf <
318 [{return N->isExactlyValue(1.0);}]
321 let isCodeGenOnly = 1, isPseudo = 1 in {
323 let usesCustomInserter = 1 in {
325 class CLAMP <RegisterClass rc> : AMDGPUShaderInst <
329 [(set f32:$dst, (AMDGPUclamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))]
332 class FABS <RegisterClass rc> : AMDGPUShaderInst <
336 [(set f32:$dst, (fabs f32:$src0))]
339 class FNEG <RegisterClass rc> : AMDGPUShaderInst <
343 [(set f32:$dst, (fneg f32:$src0))]
346 } // usesCustomInserter = 1
348 multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass,
349 ComplexPattern addrPat> {
350 let UseNamedOperandTable = 1 in {
352 def RegisterLoad : AMDGPUShaderInst <
353 (outs dstClass:$dst),
354 (ins addrClass:$addr, i32imm:$chan),
355 "RegisterLoad $dst, $addr",
356 [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))]
358 let isRegisterLoad = 1;
361 def RegisterStore : AMDGPUShaderInst <
363 (ins dstClass:$val, addrClass:$addr, i32imm:$chan),
364 "RegisterStore $val, $addr",
365 [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))]
367 let isRegisterStore = 1;
372 } // End isCodeGenOnly = 1, isPseudo = 1
374 /* Generic helper patterns for intrinsics */
375 /* -------------------------------------- */
377 class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul>
379 (fpow f32:$src0, f32:$src1),
380 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0)))
383 /* Other helper patterns */
384 /* --------------------- */
386 /* Extract element pattern */
387 class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
390 (sub_type (vector_extract vec_type:$src, sub_idx)),
391 (EXTRACT_SUBREG $src, sub_reg)
394 /* Insert element pattern */
395 class Insert_Element <ValueType elem_type, ValueType vec_type,
396 int sub_idx, SubRegIndex sub_reg>
398 (vector_insert vec_type:$vec, elem_type:$elem, sub_idx),
399 (INSERT_SUBREG $vec, $elem, sub_reg)
402 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
403 // can handle COPY instructions.
404 // bitconvert pattern
405 class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat <
406 (dt (bitconvert (st rc:$src0))),
410 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
411 // can handle COPY instructions.
412 class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat <
413 (vt (AMDGPUdwordaddr (vt rc:$addr))),
419 multiclass BFIPatterns <Instruction BFI_INT, Instruction LoadImm32> {
421 // Definition from ISA doc:
422 // (y & x) | (z & ~x)
424 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
428 // SHA-256 Ch function
431 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
436 (fcopysign f32:$src0, f32:$src1),
437 (BFI_INT (LoadImm32 0x7fffffff), $src0, $src1)
441 (f64 (fcopysign f64:$src0, f64:$src1)),
442 (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
443 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0),
444 (BFI_INT (LoadImm32 0x7fffffff),
445 (i32 (EXTRACT_SUBREG $src0, sub1)),
446 (i32 (EXTRACT_SUBREG $src1, sub1))), sub1)
450 // SHA-256 Ma patterns
452 // ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y
453 class SHA256MaPattern <Instruction BFI_INT, Instruction XOR> : Pat <
454 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))),
455 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y)
458 // Bitfield extract patterns
462 XXX: The BFE pattern is not working correctly because the XForm is not being
465 def legalshift32 : ImmLeaf <i32, [{return Imm >=0 && Imm < 32;}]>;
466 def bfemask : PatLeaf <(imm), [{return isMask_32(N->getZExtValue());}],
467 SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(CountTrailingOnes_32(N->getZExtValue()), MVT::i32);}]>>;
469 class BFEPattern <Instruction BFE> : Pat <
470 (and (srl i32:$x, legalshift32:$y), bfemask:$z),
477 class ROTRPattern <Instruction BIT_ALIGN> : Pat <
478 (rotr i32:$src0, i32:$src1),
479 (BIT_ALIGN $src0, $src0, $src1)
482 // 24-bit arithmetic patterns
483 def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
486 class UMUL24Pattern <Instruction UMUL24> : Pat <
487 (mul U24:$x, U24:$y),
492 class IMad24Pat<Instruction Inst> : Pat <
493 (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
494 (Inst $src0, $src1, $src2)
497 class UMad24Pat<Instruction Inst> : Pat <
498 (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2),
499 (Inst $src0, $src1, $src2)
502 multiclass Expand24IBitOps<Instruction MulInst, Instruction AddInst> {
503 def _expand_imad24 : Pat <
504 (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2),
505 (AddInst (MulInst $src0, $src1), $src2)
508 def _expand_imul24 : Pat <
509 (AMDGPUmul_i24 i32:$src0, i32:$src1),
510 (MulInst $src0, $src1)
514 multiclass Expand24UBitOps<Instruction MulInst, Instruction AddInst> {
515 def _expand_umad24 : Pat <
516 (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2),
517 (AddInst (MulInst $src0, $src1), $src2)
520 def _expand_umul24 : Pat <
521 (AMDGPUmul_u24 i32:$src0, i32:$src1),
522 (MulInst $src0, $src1)
526 class RcpPat<Instruction RcpInst, ValueType vt> : Pat <
527 (fdiv FP_ONE, vt:$src),
531 multiclass RsqPat<Instruction RsqInst, ValueType vt> {
533 (fdiv FP_ONE, (fsqrt vt:$src)),
538 (AMDGPUrcp (fsqrt vt:$src)),
543 include "R600Instructions.td"
544 include "R700Instructions.td"
545 include "EvergreenInstructions.td"
546 include "CaymanInstructions.td"
548 include "SIInstrInfo.td"