1 //===- SystemZInstrInfo.td - SystemZ Instruction defs ---------*- tblgen-*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the SystemZ instructions in TableGen format.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // SystemZ Instruction Predicate Definitions.
16 def IsZ10 : Predicate<"Subtarget.isZ10()">;
18 include "SystemZInstrFormats.td"
20 //===----------------------------------------------------------------------===//
22 //===----------------------------------------------------------------------===//
23 class SDTCisI8<int OpNum> : SDTCisVT<OpNum, i8>;
24 class SDTCisI16<int OpNum> : SDTCisVT<OpNum, i16>;
25 class SDTCisI32<int OpNum> : SDTCisVT<OpNum, i32>;
26 class SDTCisI64<int OpNum> : SDTCisVT<OpNum, i64>;
28 //===----------------------------------------------------------------------===//
30 //===----------------------------------------------------------------------===//
31 def SDT_SystemZCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
32 def SDT_SystemZCallSeqStart : SDCallSeqStart<[SDTCisI64<0>]>;
33 def SDT_SystemZCallSeqEnd : SDCallSeqEnd<[SDTCisI64<0>, SDTCisI64<1>]>;
34 def SDT_CmpTest : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
35 def SDT_BrCond : SDTypeProfile<0, 2,
36 [SDTCisVT<0, OtherVT>,
38 def SDT_SelectCC : SDTypeProfile<1, 3,
39 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
41 def SDT_Address : SDTypeProfile<1, 1,
42 [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
44 //===----------------------------------------------------------------------===//
45 // SystemZ Specific Node Definitions.
46 //===----------------------------------------------------------------------===//
47 def SystemZretflag : SDNode<"SystemZISD::RET_FLAG", SDTNone,
48 [SDNPHasChain, SDNPOptInFlag]>;
49 def SystemZcall : SDNode<"SystemZISD::CALL", SDT_SystemZCall,
50 [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
51 def SystemZcallseq_start :
52 SDNode<"ISD::CALLSEQ_START", SDT_SystemZCallSeqStart,
53 [SDNPHasChain, SDNPOutFlag]>;
54 def SystemZcallseq_end :
55 SDNode<"ISD::CALLSEQ_END", SDT_SystemZCallSeqEnd,
56 [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
57 def SystemZcmp : SDNode<"SystemZISD::CMP", SDT_CmpTest, [SDNPOutFlag]>;
58 def SystemZucmp : SDNode<"SystemZISD::UCMP", SDT_CmpTest, [SDNPOutFlag]>;
59 def SystemZbrcond : SDNode<"SystemZISD::BRCOND", SDT_BrCond,
60 [SDNPHasChain, SDNPInFlag]>;
61 def SystemZselect : SDNode<"SystemZISD::SELECT", SDT_SelectCC, [SDNPInFlag]>;
62 def SystemZpcrelwrapper : SDNode<"SystemZISD::PCRelativeWrapper", SDT_Address, []>;
65 include "SystemZOperands.td"
67 //===----------------------------------------------------------------------===//
70 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt),
72 [(SystemZcallseq_start timm:$amt)]>;
73 def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
75 [(SystemZcallseq_end timm:$amt1, timm:$amt2)]>;
77 let usesCustomDAGSchedInserter = 1 in {
78 def Select32 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cc),
81 (SystemZselect GR32:$src1, GR32:$src2, imm:$cc))]>;
82 def Select64 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$cc),
85 (SystemZselect GR64:$src1, GR64:$src2, imm:$cc))]>;
89 //===----------------------------------------------------------------------===//
90 // Control Flow Instructions...
93 // FIXME: Provide proper encoding!
94 let isReturn = 1, isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in {
95 def RET : Pseudo<(outs), (ins), "br\t%r14", [(SystemZretflag)]>;
98 let isBranch = 1, isTerminator = 1 in {
100 def JMP : Pseudo<(outs), (ins brtarget:$dst), "j\t{$dst}", [(br bb:$dst)]>;
102 let Uses = [PSW] in {
103 def JE : Pseudo<(outs), (ins brtarget:$dst),
105 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_E)]>;
106 def JNE : Pseudo<(outs), (ins brtarget:$dst),
108 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_NE)]>;
109 def JH : Pseudo<(outs), (ins brtarget:$dst),
111 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_H)]>;
112 def JL : Pseudo<(outs), (ins brtarget:$dst),
114 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_L)]>;
115 def JHE : Pseudo<(outs), (ins brtarget:$dst),
117 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_HE)]>;
118 def JLE : Pseudo<(outs), (ins brtarget:$dst),
120 [(SystemZbrcond bb:$dst, SYSTEMZ_COND_LE)]>;
125 //===----------------------------------------------------------------------===//
126 // Call Instructions...
130 // All calls clobber the non-callee saved registers (except R14 which we
131 // handle separately). Uses for argument registers are added manually.
132 let Defs = [R0D, R1D, R2D, R3D, R4D, R5D] in {
133 def CALLi : Pseudo<(outs), (ins i64imm:$dst, variable_ops),
134 "brasl\t%r14, $dst", [(SystemZcall imm:$dst)]>;
135 def CALLr : Pseudo<(outs), (ins ADDR64:$dst, variable_ops),
136 "brasl\t%r14, $dst", [(SystemZcall ADDR64:$dst)]>;
139 //===----------------------------------------------------------------------===//
140 // Miscellaneous Instructions.
143 let isReMaterializable = 1 in
144 // FIXME: Provide imm12 variant
145 // FIXME: Address should be halfword aligned...
146 def LA64r : Pseudo<(outs GR64:$dst), (ins laaddr:$src),
148 [(set GR64:$dst, laaddr:$src)]>;
149 def LA64rm : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
150 "larl\t{$dst, $src}",
152 (SystemZpcrelwrapper tglobaladdr:$src))]>;
154 let neverHasSideEffects = 1 in
155 def NOP : Pseudo<(outs), (ins), "# no-op", []>;
157 //===----------------------------------------------------------------------===//
160 // FIXME: Provide proper encoding!
161 let neverHasSideEffects = 1 in {
162 def MOV32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src),
165 def MOV64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src),
168 def MOV128rr : Pseudo<(outs GR128:$dst), (ins GR128:$src),
170 "\tlgr\t${dst:subreg_odd}, ${src:subreg_odd}\n"
171 "\tlgr\t${dst:subreg_even}, ${src:subreg_even}",
173 def MOV64rrP : Pseudo<(outs GR64P:$dst), (ins GR64P:$src),
175 "\tlr\t${dst:subreg_odd}, ${src:subreg_odd}\n"
176 "\tlr\t${dst:subreg_even}, ${src:subreg_even}",
180 def MOVSX64rr32 : Pseudo<(outs GR64:$dst), (ins GR32:$src),
181 "lgfr\t{$dst, $src}",
182 [(set GR64:$dst, (sext GR32:$src))]>;
183 def MOVZX64rr32 : Pseudo<(outs GR64:$dst), (ins GR32:$src),
184 "llgfr\t{$dst, $src}",
185 [(set GR64:$dst, (zext GR32:$src))]>;
187 // FIXME: Provide proper encoding!
188 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
189 def MOV32ri16 : Pseudo<(outs GR32:$dst), (ins s16imm:$src),
191 [(set GR32:$dst, immSExt16:$src)]>;
192 def MOV64ri16 : Pseudo<(outs GR64:$dst), (ins s16imm64:$src),
193 "lghi\t{$dst, $src}",
194 [(set GR64:$dst, immSExt16:$src)]>;
196 def MOV64rill16 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
197 "llill\t{$dst, $src}",
198 [(set GR64:$dst, i64ll16:$src)]>;
199 def MOV64rilh16 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
200 "llilh\t{$dst, $src}",
201 [(set GR64:$dst, i64lh16:$src)]>;
202 def MOV64rihl16 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
203 "llihl\t{$dst, $src}",
204 [(set GR64:$dst, i64hl16:$src)]>;
205 def MOV64rihh16 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
206 "llihh\t{$dst, $src}",
207 [(set GR64:$dst, i64hh16:$src)]>;
209 def MOV64ri32 : Pseudo<(outs GR64:$dst), (ins s32imm64:$src),
210 "lgfi\t{$dst, $src}",
211 [(set GR64:$dst, immSExt32:$src)]>;
212 def MOV64rilo32 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
213 "llilf\t{$dst, $src}",
214 [(set GR64:$dst, i64lo32:$src)]>;
215 def MOV64rihi32 : Pseudo<(outs GR64:$dst), (ins i64imm:$src),
216 "llihf\t{$dst, $src}",
217 [(set GR64:$dst, i64hi32:$src)]>;
220 let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in {
221 def MOV32rm : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
223 [(set GR32:$dst, (load rriaddr:$src))]>;
224 def MOV64rm : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
226 [(set GR64:$dst, (load rriaddr:$src))]>;
230 def MOV32mr : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
232 [(store GR32:$src, rriaddr:$dst)]>;
233 def MOV64mr : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
235 [(store GR64:$src, rriaddr:$dst)]>;
237 // FIXME: displacements here are really 12 bit, not 20!
238 def MOV8mi : Pseudo<(outs), (ins riaddr:$dst, i32i8imm:$src),
239 "mviy\t{$dst, $src}",
240 [(truncstorei8 (i32 i32immSExt8:$src), riaddr:$dst)]>;
242 def MOV16mi : Pseudo<(outs), (ins riaddr:$dst, s16imm:$src),
243 "mvhhi\t{$dst, $src}",
244 [(truncstorei16 (i32 i32immSExt16:$src), riaddr:$dst)]>,
246 def MOV32mi16 : Pseudo<(outs), (ins riaddr:$dst, s32imm:$src),
247 "mvhi\t{$dst, $src}",
248 [(store (i32 immSExt16:$src), riaddr:$dst)]>,
250 def MOV64mi16 : Pseudo<(outs), (ins riaddr:$dst, s32imm64:$src),
251 "mvghi\t{$dst, $src}",
252 [(store (i64 immSExt16:$src), riaddr:$dst)]>,
256 def MOVSX32rr8 : Pseudo<(outs GR32:$dst), (ins GR32:$src),
258 [(set GR32:$dst, (sext_inreg GR32:$src, i8))]>;
259 def MOVSX64rr8 : Pseudo<(outs GR64:$dst), (ins GR64:$src),
260 "lgbr\t{$dst, $src}",
261 [(set GR64:$dst, (sext_inreg GR64:$src, i8))]>;
262 def MOVSX32rr16 : Pseudo<(outs GR32:$dst), (ins GR32:$src),
264 [(set GR32:$dst, (sext_inreg GR32:$src, i16))]>;
265 def MOVSX64rr16 : Pseudo<(outs GR64:$dst), (ins GR64:$src),
266 "lghr\t{$dst, $src}",
267 [(set GR64:$dst, (sext_inreg GR64:$src, i16))]>;
270 def MOVSX32rm8 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
272 [(set GR32:$dst, (sextloadi32i8 rriaddr:$src))]>;
273 def MOVSX32rm16 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
275 [(set GR32:$dst, (sextloadi32i16 rriaddr:$src))]>;
276 def MOVSX64rm8 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
278 [(set GR64:$dst, (sextloadi64i8 rriaddr:$src))]>;
279 def MOVSX64rm16 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
281 [(set GR64:$dst, (sextloadi64i16 rriaddr:$src))]>;
282 def MOVSX64rm32 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
284 [(set GR64:$dst, (sextloadi64i32 rriaddr:$src))]>;
286 def MOVZX32rm8 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
288 [(set GR32:$dst, (zextloadi32i8 rriaddr:$src))]>;
289 def MOVZX32rm16 : Pseudo<(outs GR32:$dst), (ins rriaddr:$src),
291 [(set GR32:$dst, (zextloadi32i16 rriaddr:$src))]>;
292 def MOVZX64rm8 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
293 "llgc\t{$dst, $src}",
294 [(set GR64:$dst, (zextloadi64i8 rriaddr:$src))]>;
295 def MOVZX64rm16 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
296 "llgh\t{$dst, $src}",
297 [(set GR64:$dst, (zextloadi64i16 rriaddr:$src))]>;
298 def MOVZX64rm32 : Pseudo<(outs GR64:$dst), (ins rriaddr:$src),
299 "llgf\t{$dst, $src}",
300 [(set GR64:$dst, (zextloadi64i32 rriaddr:$src))]>;
303 // FIXME: Implement 12-bit displacement stuff someday
304 def MOV32m8r : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
305 "stcy\t{$src, $dst}",
306 [(truncstorei8 GR32:$src, rriaddr:$dst)]>;
308 def MOV32m16r : Pseudo<(outs), (ins rriaddr:$dst, GR32:$src),
309 "sthy\t{$src, $dst}",
310 [(truncstorei16 GR32:$src, rriaddr:$dst)]>;
312 def MOV64m8r : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
313 "stcy\t{$src, $dst}",
314 [(truncstorei8 GR64:$src, rriaddr:$dst)]>;
316 def MOV64m16r : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
317 "sthy\t{$src, $dst}",
318 [(truncstorei16 GR64:$src, rriaddr:$dst)]>;
320 def MOV64m32r : Pseudo<(outs), (ins rriaddr:$dst, GR64:$src),
322 [(truncstorei32 GR64:$src, rriaddr:$dst)]>;
324 // multiple regs moves
325 // FIXME: should we use multiple arg nodes?
326 def MOV32mrm : Pseudo<(outs), (ins riaddr:$dst, GR32:$from, GR32:$to),
327 "stmy\t{$from, $to, $dst}",
329 def MOV64mrm : Pseudo<(outs), (ins riaddr:$dst, GR64:$from, GR64:$to),
330 "stmg\t{$from, $to, $dst}",
332 def MOV32rmm : Pseudo<(outs GR32:$from, GR32:$to), (ins riaddr:$dst),
333 "lmy\t{$from, $to, $dst}",
335 def MOV64rmm : Pseudo<(outs GR64:$from, GR64:$to), (ins riaddr:$dst),
336 "lmg\t{$from, $to, $dst}",
340 //===----------------------------------------------------------------------===//
341 // Arithmetic Instructions
343 let Defs = [PSW] in {
344 def NEG32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src),
346 [(set GR32:$dst, (ineg GR32:$src)),
348 def NEG64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src),
349 "lcgr\t{$dst, $src}",
350 [(set GR64:$dst, (ineg GR64:$src)),
352 def NEG64rr32 : Pseudo<(outs GR64:$dst), (ins GR32:$src),
353 "lcgfr\t{$dst, $src}",
354 [(set GR64:$dst, (ineg (sext GR32:$src))),
358 let isTwoAddress = 1 in {
360 let Defs = [PSW] in {
362 let isCommutable = 1 in { // X = ADD Y, Z == X = ADD Z, Y
363 // FIXME: Provide proper encoding!
364 def ADD32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
366 [(set GR32:$dst, (add GR32:$src1, GR32:$src2)),
368 def ADD64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
369 "agr\t{$dst, $src2}",
370 [(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
374 // FIXME: Provide proper encoding!
375 def ADD32ri16 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, s16imm:$src2),
376 "ahi\t{$dst, $src2}",
377 [(set GR32:$dst, (add GR32:$src1, immSExt16:$src2)),
379 def ADD32ri : Pseudo<(outs GR32:$dst), (ins GR32:$src1, s32imm:$src2),
380 "afi\t{$dst, $src2}",
381 [(set GR32:$dst, (add GR32:$src1, imm:$src2)),
383 def ADD64ri16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, s16imm64:$src2),
384 "aghi\t{$dst, $src2}",
385 [(set GR64:$dst, (add GR64:$src1, immSExt16:$src2)),
387 def ADD64ri32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, s32imm64:$src2),
388 "agfi\t{$dst, $src2}",
389 [(set GR64:$dst, (add GR64:$src1, immSExt32:$src2)),
392 let isCommutable = 1 in { // X = AND Y, Z == X = AND Z, Y
393 // FIXME: Provide proper encoding!
394 def AND32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
396 [(set GR32:$dst, (and GR32:$src1, GR32:$src2))]>;
397 def AND64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
398 "ngr\t{$dst, $src2}",
399 [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
402 // FIXME: Provide proper encoding!
403 // FIXME: Compute masked bits properly!
404 def AND32rill16 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
405 "nill\t{$dst, $src2}",
406 [(set GR32:$dst, (and GR32:$src1, i32ll16c:$src2))]>;
407 def AND64rill16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
408 "nill\t{$dst, $src2}",
409 [(set GR64:$dst, (and GR64:$src1, i64ll16c:$src2))]>;
411 def AND32rilh16 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
412 "nilh\t{$dst, $src2}",
413 [(set GR32:$dst, (and GR32:$src1, i32lh16c:$src2))]>;
414 def AND64rilh16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
415 "nilh\t{$dst, $src2}",
416 [(set GR64:$dst, (and GR64:$src1, i64lh16c:$src2))]>;
418 def AND64rihl16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
419 "nihl\t{$dst, $src2}",
420 [(set GR64:$dst, (and GR64:$src1, i64hl16c:$src2))]>;
421 def AND64rihh16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
422 "nihh\t{$dst, $src2}",
423 [(set GR64:$dst, (and GR64:$src1, i64hh16c:$src2))]>;
425 def AND32ri : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
426 "nilf\t{$dst, $src2}",
427 [(set GR32:$dst, (and GR32:$src1, imm:$src2))]>;
428 def AND64rilo32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
429 "nilf\t{$dst, $src2}",
430 [(set GR64:$dst, (and GR64:$src1, i64lo32c:$src2))]>;
431 def AND64rihi32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
432 "nihf\t{$dst, $src2}",
433 [(set GR64:$dst, (and GR64:$src1, i64hi32c:$src2))]>;
435 let isCommutable = 1 in { // X = OR Y, Z == X = OR Z, Y
436 // FIXME: Provide proper encoding!
437 def OR32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
439 [(set GR32:$dst, (or GR32:$src1, GR32:$src2))]>;
440 def OR64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
441 "ogr\t{$dst, $src2}",
442 [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
445 def OR32ri16 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
446 "oill\t{$dst, $src2}",
447 [(set GR32:$dst, (or GR32:$src1, i32ll16:$src2))]>;
448 def OR32ri16h : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
449 "oilh\t{$dst, $src2}",
450 [(set GR32:$dst, (or GR32:$src1, i32lh16:$src2))]>;
451 def OR32ri : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
452 "oilf\t{$dst, $src2}",
453 [(set GR32:$dst, (or GR32:$src1, imm:$src2))]>;
455 def OR64rill16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
456 "oill\t{$dst, $src2}",
457 [(set GR64:$dst, (or GR64:$src1, i64ll16:$src2))]>;
458 def OR64rilh16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
459 "oilh\t{$dst, $src2}",
460 [(set GR64:$dst, (or GR64:$src1, i64lh16:$src2))]>;
461 def OR64rihl16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
462 "oihl\t{$dst, $src2}",
463 [(set GR64:$dst, (or GR64:$src1, i64hl16:$src2))]>;
464 def OR64rihh16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
465 "oihh\t{$dst, $src2}",
466 [(set GR64:$dst, (or GR64:$src1, i64hh16:$src2))]>;
468 def OR64rilo32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
469 "oilf\t{$dst, $src2}",
470 [(set GR64:$dst, (or GR64:$src1, i64lo32:$src2))]>;
471 def OR64rihi32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, i64imm:$src2),
472 "oihf\t{$dst, $src2}",
473 [(set GR64:$dst, (or GR64:$src1, i64hi32:$src2))]>;
475 // FIXME: Provide proper encoding!
476 def SUB32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
478 [(set GR32:$dst, (sub GR32:$src1, GR32:$src2))]>;
479 def SUB64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
480 "sgr\t{$dst, $src2}",
481 [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
484 let isCommutable = 1 in { // X = XOR Y, Z == X = XOR Z, Y
485 // FIXME: Provide proper encoding!
486 def XOR32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
488 [(set GR32:$dst, (xor GR32:$src1, GR32:$src2))]>;
489 def XOR64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
490 "xgr\t{$dst, $src2}",
491 [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
494 def XOR32ri : Pseudo<(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
495 "xilf\t{$dst, $src2}",
496 [(set GR32:$dst, (xor GR32:$src1, imm:$src2))]>;
500 let isCommutable = 1 in { // X = MUL Y, Z == X = MUL Z, Y
501 def MUL32rr : Pseudo<(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
502 "msr\t{$dst, $src2}",
503 [(set GR32:$dst, (mul GR32:$src1, GR32:$src2))]>;
504 def MUL64rr : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
505 "msgr\t{$dst, $src2}",
506 [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>;
508 def MUL64rrP : Pseudo<(outs GR64P:$dst), (ins GR64P:$src1, GR32:$src2),
511 def UMUL64rrP : Pseudo<(outs GR64P:$dst), (ins GR64P:$src1, GR32:$src2),
512 "mlr\t{$dst, $src2}",
514 def UMUL128rrP : Pseudo<(outs GR128:$dst), (ins GR128:$src1, GR64:$src2),
515 "mlgr\t{$dst, $src2}",
520 def MUL32ri16 : Pseudo<(outs GR32:$dst), (ins GR32:$src1, s16imm:$src2),
521 "mhi\t{$dst, $src2}",
522 [(set GR32:$dst, (mul GR32:$src1, i32immSExt16:$src2))]>;
523 def MUL64ri16 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, s16imm64:$src2),
524 "mghi\t{$dst, $src2}",
525 [(set GR64:$dst, (mul GR64:$src1, immSExt16:$src2))]>;
527 def MUL32ri : Pseudo<(outs GR32:$dst), (ins GR32:$src1, s32imm:$src2),
528 "msfi\t{$dst, $src2}",
529 [(set GR32:$dst, (mul GR32:$src1, imm:$src2))]>,
531 def MUL64ri32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, s32imm64:$src2),
532 "msgfi\t{$dst, $src2}",
533 [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>,
536 def MUL32rm : Pseudo<(outs GR32:$dst), (ins GR32:$src1, rriaddr:$src2),
537 "msy\t{$dst, $src2}",
538 [(set GR32:$dst, (mul GR32:$src1, (load rriaddr:$src2)))]>;
539 def MUL64rm : Pseudo<(outs GR64:$dst), (ins GR64:$src1, rriaddr:$src2),
540 "msg\t{$dst, $src2}",
541 [(set GR64:$dst, (mul GR64:$src1, (load rriaddr:$src2)))]>;
543 def MULSX64rr32 : Pseudo<(outs GR64:$dst), (ins GR64:$src1, GR32:$src2),
544 "msgfr\t{$dst, $src2}",
545 [(set GR64:$dst, (mul GR64:$src1, (sext GR32:$src2)))]>;
547 def SDIVREM64rrP : Pseudo<(outs GR64P:$dst), (ins GR64P:$src1, GR32:$src2),
551 def SDIVREM128rrP : Pseudo<(outs GR128:$dst), (ins GR128:$src1, GR64:$src2),
552 "dsgr\t{$dst, $src2}",
555 def UDIVREM64rrP : Pseudo<(outs GR64P:$dst), (ins GR64P:$src1, GR32:$src2),
556 "dlr\t{$dst, $src2}",
559 def UDIVREM128rrP : Pseudo<(outs GR128:$dst), (ins GR128:$src1, GR64:$src2),
560 "dlgr\t{$dst, $src2}",
563 } // isTwoAddress = 1
565 //===----------------------------------------------------------------------===//
568 let isTwoAddress = 1 in
569 def SRL32rri : Pseudo<(outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
571 [(set GR32:$dst, (srl GR32:$src, riaddr32:$amt))]>;
572 def SRL64rri : Pseudo<(outs GR64:$dst), (ins GR64:$src, riaddr:$amt),
573 "srlg\t{$dst, $src, $amt}",
574 [(set GR64:$dst, (srl GR64:$src, (i32 (trunc riaddr:$amt))))]>;
575 def SRLA64ri : Pseudo<(outs GR64:$dst), (ins GR64:$src, i32imm:$amt),
576 "srlg\t{$dst, $src, $amt}",
577 [(set GR64:$dst, (srl GR64:$src, (i32 imm:$amt)))]>;
579 let isTwoAddress = 1 in
580 def SHL32rri : Pseudo<(outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
582 [(set GR32:$dst, (shl GR32:$src, riaddr32:$amt))]>;
583 def SHL64rri : Pseudo<(outs GR64:$dst), (ins GR64:$src, riaddr:$amt),
584 "sllg\t{$dst, $src, $amt}",
585 [(set GR64:$dst, (shl GR64:$src, (i32 (trunc riaddr:$amt))))]>;
586 def SHL64ri : Pseudo<(outs GR64:$dst), (ins GR64:$src, i32imm:$amt),
587 "sllg\t{$dst, $src, $amt}",
588 [(set GR64:$dst, (shl GR64:$src, (i32 imm:$amt)))]>;
591 let Defs = [PSW] in {
592 let isTwoAddress = 1 in
593 def SRA32rri : Pseudo<(outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
595 [(set GR32:$dst, (sra GR32:$src, riaddr32:$amt)),
597 def SRA64rri : Pseudo<(outs GR64:$dst), (ins GR64:$src, riaddr:$amt),
598 "srag\t{$dst, $src, $amt}",
599 [(set GR64:$dst, (sra GR64:$src, (i32 (trunc riaddr:$amt)))),
601 def SRA64ri : Pseudo<(outs GR64:$dst), (ins GR64:$src, i32imm:$amt),
602 "srag\t{$dst, $src, $amt}",
603 [(set GR64:$dst, (sra GR64:$src, (i32 imm:$amt))),
607 //===----------------------------------------------------------------------===//
608 // Test instructions (like AND but do not produce any result
610 // Integer comparisons
611 let Defs = [PSW] in {
612 def CMP32rr : Pseudo<(outs), (ins GR32:$src1, GR32:$src2),
614 [(SystemZcmp GR32:$src1, GR32:$src2), (implicit PSW)]>;
615 def CMP64rr : Pseudo<(outs), (ins GR64:$src1, GR64:$src2),
617 [(SystemZcmp GR64:$src1, GR64:$src2), (implicit PSW)]>;
619 def CMP32ri : Pseudo<(outs), (ins GR32:$src1, s32imm:$src2),
621 [(SystemZcmp GR32:$src1, imm:$src2), (implicit PSW)]>;
622 def CMP64ri32 : Pseudo<(outs), (ins GR64:$src1, s32imm64:$src2),
623 "cgfi\t$src1, $src2",
624 [(SystemZcmp GR64:$src1, i64immSExt32:$src2),
627 def CMP32rm : Pseudo<(outs), (ins GR32:$src1, rriaddr:$src2),
629 [(SystemZcmp GR32:$src1, (load rriaddr:$src2)),
631 def CMP64rm : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
633 [(SystemZcmp GR64:$src1, (load rriaddr:$src2)),
636 def UCMP32rr : Pseudo<(outs), (ins GR32:$src1, GR32:$src2),
638 [(SystemZucmp GR32:$src1, GR32:$src2), (implicit PSW)]>;
639 def UCMP64rr : Pseudo<(outs), (ins GR64:$src1, GR64:$src2),
640 "clgr\t$src1, $src2",
641 [(SystemZucmp GR64:$src1, GR64:$src2), (implicit PSW)]>;
643 def UCMP32ri : Pseudo<(outs), (ins GR32:$src1, i32imm:$src2),
644 "clfi\t$src1, $src2",
645 [(SystemZucmp GR32:$src1, imm:$src2), (implicit PSW)]>;
646 def UCMP64ri32 : Pseudo<(outs), (ins GR64:$src1, i64i32imm:$src2),
647 "clgfi\t$src1, $src2",
648 [(SystemZucmp GR64:$src1, i64immZExt32:$src2),
651 def UCMP32rm : Pseudo<(outs), (ins GR32:$src1, rriaddr:$src2),
653 [(SystemZucmp GR32:$src1, (load rriaddr:$src2)),
655 def UCMP64rm : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
657 [(SystemZucmp GR64:$src1, (load rriaddr:$src2)),
660 def CMPSX64rr32 : Pseudo<(outs), (ins GR64:$src1, GR32:$src2),
661 "cgfr\t$src1, $src2",
662 [(SystemZucmp GR64:$src1, (sext GR32:$src2)),
664 def UCMPZX64rr32 : Pseudo<(outs), (ins GR64:$src1, GR32:$src2),
665 "clgfr\t$src1, $src2",
666 [(SystemZucmp GR64:$src1, (zext GR32:$src2)),
669 def CMPSX64rm32 : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
671 [(SystemZucmp GR64:$src1, (sextloadi64i32 rriaddr:$src2)),
673 def UCMPZX64rm32 : Pseudo<(outs), (ins GR64:$src1, rriaddr:$src2),
674 "clgf\t$src1, $src2",
675 [(SystemZucmp GR64:$src1, (zextloadi64i32 rriaddr:$src2)),
678 // FIXME: Add other crazy ucmp forms
682 //===----------------------------------------------------------------------===//
683 // Non-Instruction Patterns.
684 //===----------------------------------------------------------------------===//
687 def : Pat<(i64 (anyext GR32:$src)),
688 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_32bit)>;
690 //===----------------------------------------------------------------------===//
692 //===----------------------------------------------------------------------===//
694 // FIXME: use add/sub tricks with 32678/-32768
697 def : Pat<(i32 (trunc GR64:$src)),
698 (EXTRACT_SUBREG GR64:$src, subreg_32bit)>;
700 // sext_inreg patterns
701 def : Pat<(sext_inreg GR64:$src, i32),
702 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, subreg_32bit))>;
705 def : Pat<(extloadi32i8 rriaddr:$src), (MOVZX32rm8 rriaddr:$src)>;
706 def : Pat<(extloadi32i16 rriaddr:$src), (MOVZX32rm16 rriaddr:$src)>;
707 def : Pat<(extloadi64i8 rriaddr:$src), (MOVZX64rm8 rriaddr:$src)>;
708 def : Pat<(extloadi64i16 rriaddr:$src), (MOVZX64rm16 rriaddr:$src)>;
709 def : Pat<(extloadi64i32 rriaddr:$src), (MOVZX64rm32 rriaddr:$src)>;
712 def : Pat<(SystemZcall (i64 tglobaladdr:$dst)),
713 (CALLi tglobaladdr:$dst)>;
714 def : Pat<(SystemZcall (i64 texternalsym:$dst)),
715 (CALLi texternalsym:$dst)>;
718 def : Pat<(mulhs GR32:$src1, GR32:$src2),
719 (EXTRACT_SUBREG (MUL64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
720 GR32:$src1, subreg_odd),
724 def : Pat<(mulhu GR32:$src1, GR32:$src2),
725 (EXTRACT_SUBREG (UMUL64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
726 GR32:$src1, subreg_odd),
729 def : Pat<(mulhu GR64:$src1, GR64:$src2),
730 (EXTRACT_SUBREG (UMUL128rrP (INSERT_SUBREG (i128 (IMPLICIT_DEF)),
731 GR64:$src1, subreg_odd),
736 // FIXME: Add memory versions
737 def : Pat<(sdiv GR32:$src1, GR32:$src2),
738 (EXTRACT_SUBREG (SDIVREM64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
739 GR32:$src1, subreg_odd),
742 def : Pat<(sdiv GR64:$src1, GR64:$src2),
743 (EXTRACT_SUBREG (SDIVREM128rrP (INSERT_SUBREG (i128 (IMPLICIT_DEF)),
744 GR64:$src1, subreg_odd),
747 def : Pat<(udiv GR32:$src1, GR32:$src2),
748 (EXTRACT_SUBREG (UDIVREM64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
749 GR32:$src1, subreg_odd),
752 def : Pat<(udiv GR64:$src1, GR64:$src2),
753 (EXTRACT_SUBREG (UDIVREM128rrP (INSERT_SUBREG (i128 (IMPLICIT_DEF)),
754 GR64:$src1, subreg_odd),
759 // FIXME: Add memory versions
760 def : Pat<(srem GR32:$src1, GR32:$src2),
761 (EXTRACT_SUBREG (SDIVREM64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
762 GR32:$src1, subreg_odd),
765 def : Pat<(srem GR64:$src1, GR64:$src2),
766 (EXTRACT_SUBREG (SDIVREM128rrP (INSERT_SUBREG (i128 (IMPLICIT_DEF)),
767 GR64:$src1, subreg_odd),
770 def : Pat<(urem GR32:$src1, GR32:$src2),
771 (EXTRACT_SUBREG (UDIVREM64rrP (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
772 GR32:$src1, subreg_odd),
775 def : Pat<(urem GR64:$src1, GR64:$src2),
776 (EXTRACT_SUBREG (UDIVREM128rrP (INSERT_SUBREG (i128 (IMPLICIT_DEF)),
777 GR64:$src1, subreg_odd),
781 def : Pat<(i32 imm:$src),
782 (EXTRACT_SUBREG (MOV64ri32 (i64 imm:$src)), subreg_32bit)>;