1 //=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // AArch64 Instruction definitions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // ARM Instruction Predicate Definitions.
17 def HasV8_1a : Predicate<"Subtarget->hasV8_1aOps()">,
18 AssemblerPredicate<"HasV8_1aOps", "armv8.1a">;
19 def HasV8_2a : Predicate<"Subtarget->hasV8_2aOps()">,
20 AssemblerPredicate<"HasV8_2aOps", "armv8.2a">;
21 def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">,
22 AssemblerPredicate<"FeatureFPARMv8", "fp-armv8">;
23 def HasNEON : Predicate<"Subtarget->hasNEON()">,
24 AssemblerPredicate<"FeatureNEON", "neon">;
25 def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
26 AssemblerPredicate<"FeatureCrypto", "crypto">;
27 def HasCRC : Predicate<"Subtarget->hasCRC()">,
28 AssemblerPredicate<"FeatureCRC", "crc">;
29 def HasPerfMon : Predicate<"Subtarget->hasPerfMon()">;
30 def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">,
31 AssemblerPredicate<"FeatureFullFP16", "fullfp16">;
33 def IsLE : Predicate<"Subtarget->isLittleEndian()">;
34 def IsBE : Predicate<"!Subtarget->isLittleEndian()">;
35 def IsCyclone : Predicate<"Subtarget->isCyclone()">;
37 //===----------------------------------------------------------------------===//
38 // AArch64-specific DAG Nodes.
41 // SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
42 def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
45 SDTCisInt<0>, SDTCisVT<1, i32>]>;
47 // SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
48 def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
54 // SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
55 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
62 def SDT_AArch64Brcond : SDTypeProfile<0, 3,
63 [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
65 def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
66 def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
67 SDTCisVT<2, OtherVT>]>;
70 def SDT_AArch64CSel : SDTypeProfile<1, 4,
75 def SDT_AArch64CCMP : SDTypeProfile<1, 5,
82 def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
89 def SDT_AArch64FCmp : SDTypeProfile<0, 2,
92 def SDT_AArch64Dup : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
93 def SDT_AArch64DupLane : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
94 def SDT_AArch64Zip : SDTypeProfile<1, 2, [SDTCisVec<0>,
97 def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
98 def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
99 def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
100 SDTCisInt<2>, SDTCisInt<3>]>;
101 def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
102 def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
103 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
104 def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
106 def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
107 def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
108 def SDT_AArch64fcmp : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
109 def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
111 def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
114 def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
115 def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
117 def SDT_AArch64ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
119 def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
122 // Generates the general dynamic sequences, i.e.
123 // adrp x0, :tlsdesc:var
124 // ldr x1, [x0, #:tlsdesc_lo12:var]
125 // add x0, x0, #:tlsdesc_lo12:var
129 // (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
130 // number of operands (the variable)
131 def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
134 def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
135 [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
136 SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
137 SDTCisSameAs<1, 4>]>;
141 def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
142 def AArch64addlow : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
143 def AArch64LOADgot : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
144 def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
145 SDCallSeqStart<[ SDTCisVT<0, i32> ]>,
146 [SDNPHasChain, SDNPOutGlue]>;
147 def AArch64callseq_end : SDNode<"ISD::CALLSEQ_END",
148 SDCallSeqEnd<[ SDTCisVT<0, i32>,
150 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
151 def AArch64call : SDNode<"AArch64ISD::CALL",
152 SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
153 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
155 def AArch64brcond : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
157 def AArch64cbz : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
159 def AArch64cbnz : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
161 def AArch64tbz : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
163 def AArch64tbnz : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
167 def AArch64csel : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
168 def AArch64csinv : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
169 def AArch64csneg : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
170 def AArch64csinc : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
171 def AArch64retflag : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
172 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
173 def AArch64adc : SDNode<"AArch64ISD::ADC", SDTBinaryArithWithFlagsIn >;
174 def AArch64sbc : SDNode<"AArch64ISD::SBC", SDTBinaryArithWithFlagsIn>;
175 def AArch64add_flag : SDNode<"AArch64ISD::ADDS", SDTBinaryArithWithFlagsOut,
177 def AArch64sub_flag : SDNode<"AArch64ISD::SUBS", SDTBinaryArithWithFlagsOut>;
178 def AArch64and_flag : SDNode<"AArch64ISD::ANDS", SDTBinaryArithWithFlagsOut,
180 def AArch64adc_flag : SDNode<"AArch64ISD::ADCS", SDTBinaryArithWithFlagsInOut>;
181 def AArch64sbc_flag : SDNode<"AArch64ISD::SBCS", SDTBinaryArithWithFlagsInOut>;
183 def AArch64ccmp : SDNode<"AArch64ISD::CCMP", SDT_AArch64CCMP>;
184 def AArch64ccmn : SDNode<"AArch64ISD::CCMN", SDT_AArch64CCMP>;
185 def AArch64fccmp : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
187 def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
189 def AArch64fcmp : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
191 def AArch64dup : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
192 def AArch64duplane8 : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
193 def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
194 def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
195 def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
197 def AArch64zip1 : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
198 def AArch64zip2 : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
199 def AArch64uzp1 : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
200 def AArch64uzp2 : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
201 def AArch64trn1 : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
202 def AArch64trn2 : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
204 def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
205 def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
206 def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
207 def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
208 def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
209 def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
210 def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
212 def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
213 def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
214 def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
215 def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
217 def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
218 def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
219 def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
220 def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
221 def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
222 def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
223 def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
224 def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
226 def AArch64not: SDNode<"AArch64ISD::NOT", SDT_AArch64unvec>;
227 def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
228 def AArch64bsl: SDNode<"AArch64ISD::BSL", SDT_AArch64trivec>;
230 def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
231 def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
232 def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
233 def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
234 def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
236 def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
237 def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
238 def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
240 def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
241 def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
242 def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
243 def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
244 def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
245 def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
246 (AArch64not (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
248 def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
249 def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
250 def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
251 def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
252 def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
254 def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
255 def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
257 def AArch64neg : SDNode<"AArch64ISD::NEG", SDT_AArch64unvec>;
259 def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
260 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
262 def AArch64Prefetch : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
263 [SDNPHasChain, SDNPSideEffect]>;
265 def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
266 def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
268 def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
269 SDT_AArch64TLSDescCallSeq,
270 [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
274 def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
275 SDT_AArch64WrapperLarge>;
277 def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
279 def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
280 SDTCisSameAs<1, 2>]>;
281 def AArch64smull : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
282 def AArch64umull : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
284 def AArch64saddv : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
285 def AArch64uaddv : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
286 def AArch64sminv : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
287 def AArch64uminv : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
288 def AArch64smaxv : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
289 def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
291 //===----------------------------------------------------------------------===//
293 //===----------------------------------------------------------------------===//
295 // AArch64 Instruction Predicate Definitions.
297 def HasZCZ : Predicate<"Subtarget->hasZeroCycleZeroing()">;
298 def NoZCZ : Predicate<"!Subtarget->hasZeroCycleZeroing()">;
299 def IsDarwin : Predicate<"Subtarget->isTargetDarwin()">;
300 def IsNotDarwin: Predicate<"!Subtarget->isTargetDarwin()">;
301 def ForCodeSize : Predicate<"ForCodeSize">;
302 def NotForCodeSize : Predicate<"!ForCodeSize">;
304 include "AArch64InstrFormats.td"
306 //===----------------------------------------------------------------------===//
308 //===----------------------------------------------------------------------===//
309 // Miscellaneous instructions.
310 //===----------------------------------------------------------------------===//
312 let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
313 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
314 [(AArch64callseq_start timm:$amt)]>;
315 def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
316 [(AArch64callseq_end timm:$amt1, timm:$amt2)]>;
317 } // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
319 let isReMaterializable = 1, isCodeGenOnly = 1 in {
320 // FIXME: The following pseudo instructions are only needed because remat
321 // cannot handle multiple instructions. When that changes, they can be
322 // removed, along with the AArch64Wrapper node.
324 let AddedComplexity = 10 in
325 def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr),
326 [(set GPR64:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
329 // The MOVaddr instruction should match only when the add is not folded
330 // into a load or store address.
332 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
333 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
334 tglobaladdr:$low))]>,
335 Sched<[WriteAdrAdr]>;
337 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
338 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
340 Sched<[WriteAdrAdr]>;
342 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
343 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
345 Sched<[WriteAdrAdr]>;
347 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
348 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
349 tblockaddress:$low))]>,
350 Sched<[WriteAdrAdr]>;
352 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
353 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
354 tglobaltlsaddr:$low))]>,
355 Sched<[WriteAdrAdr]>;
357 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
358 [(set GPR64:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
359 texternalsym:$low))]>,
360 Sched<[WriteAdrAdr]>;
362 } // isReMaterializable, isCodeGenOnly
364 def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
365 (LOADgot tglobaltlsaddr:$addr)>;
367 def : Pat<(AArch64LOADgot texternalsym:$addr),
368 (LOADgot texternalsym:$addr)>;
370 def : Pat<(AArch64LOADgot tconstpool:$addr),
371 (LOADgot tconstpool:$addr)>;
373 //===----------------------------------------------------------------------===//
374 // System instructions.
375 //===----------------------------------------------------------------------===//
377 def HINT : HintI<"hint">;
378 def : InstAlias<"nop", (HINT 0b000)>;
379 def : InstAlias<"yield",(HINT 0b001)>;
380 def : InstAlias<"wfe", (HINT 0b010)>;
381 def : InstAlias<"wfi", (HINT 0b011)>;
382 def : InstAlias<"sev", (HINT 0b100)>;
383 def : InstAlias<"sevl", (HINT 0b101)>;
385 // As far as LLVM is concerned this writes to the system's exclusive monitors.
386 let mayLoad = 1, mayStore = 1 in
387 def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
389 // NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
390 // model patterns with sufficiently fine granularity.
391 let mayLoad = ?, mayStore = ? in {
392 def DMB : CRmSystemI<barrier_op, 0b101, "dmb",
393 [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
395 def DSB : CRmSystemI<barrier_op, 0b100, "dsb",
396 [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
398 def ISB : CRmSystemI<barrier_op, 0b110, "isb",
399 [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
402 def : InstAlias<"clrex", (CLREX 0xf)>;
403 def : InstAlias<"isb", (ISB 0xf)>;
407 def MSRpstateImm1 : MSRpstateImm0_1;
408 def MSRpstateImm4 : MSRpstateImm0_15;
410 // The thread pointer (on Linux, at least, where this has been implemented) is
412 def : Pat<(AArch64threadpointer), (MRS 0xde82)>;
414 // The cycle counter PMC register is PMCCNTR_EL0.
415 let Predicates = [HasPerfMon] in
416 def : Pat<(readcyclecounter), (MRS 0xdce8)>;
418 // Generic system instructions
419 def SYSxt : SystemXtI<0, "sys">;
420 def SYSLxt : SystemLXtI<1, "sysl">;
422 def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
423 (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
424 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
426 //===----------------------------------------------------------------------===//
427 // Move immediate instructions.
428 //===----------------------------------------------------------------------===//
430 defm MOVK : InsertImmediate<0b11, "movk">;
431 defm MOVN : MoveImmediate<0b00, "movn">;
433 let PostEncoderMethod = "fixMOVZ" in
434 defm MOVZ : MoveImmediate<0b10, "movz">;
436 // First group of aliases covers an implicit "lsl #0".
437 def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, imm0_65535:$imm, 0)>;
438 def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, imm0_65535:$imm, 0)>;
439 def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, imm0_65535:$imm, 0)>;
440 def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, imm0_65535:$imm, 0)>;
441 def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, imm0_65535:$imm, 0)>;
442 def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, imm0_65535:$imm, 0)>;
444 // Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
445 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
446 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
447 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
448 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
450 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
451 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
452 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
453 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
455 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g3:$sym, 48)>;
456 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g2:$sym, 32)>;
457 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g1:$sym, 16)>;
458 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g0:$sym, 0)>;
460 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
461 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
463 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
464 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
466 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g1:$sym, 16)>;
467 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g0:$sym, 0)>;
469 // Final group of aliases covers true "mov $Rd, $imm" cases.
470 multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
471 int width, int shift> {
472 def _asmoperand : AsmOperandClass {
473 let Name = basename # width # "_lsl" # shift # "MovAlias";
474 let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
476 let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
479 def _movimm : Operand<i32> {
480 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
483 def : InstAlias<"mov $Rd, $imm",
484 (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
487 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
488 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
490 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
491 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
492 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
493 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
495 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
496 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
498 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
499 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
500 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
501 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
503 let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
504 isAsCheapAsAMove = 1 in {
505 // FIXME: The following pseudo instructions are only needed because remat
506 // cannot handle multiple instructions. When that changes, we can select
507 // directly to the real instructions and get rid of these pseudos.
510 : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
511 [(set GPR32:$dst, imm:$src)]>,
514 : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
515 [(set GPR64:$dst, imm:$src)]>,
517 } // isReMaterializable, isCodeGenOnly
519 // If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
520 // eventual expansion code fewer bits to worry about getting right. Marshalling
521 // the types is a little tricky though:
522 def i64imm_32bit : ImmLeaf<i64, [{
523 return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
526 def trunc_imm : SDNodeXForm<imm, [{
527 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
530 def : Pat<(i64 i64imm_32bit:$src),
531 (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
533 // Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
534 def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
535 return CurDAG->getTargetConstant(
536 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
539 def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
540 return CurDAG->getTargetConstant(
541 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
545 def : Pat<(f32 fpimm:$in),
546 (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
547 def : Pat<(f64 fpimm:$in),
548 (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
551 // Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
553 def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
554 tglobaladdr:$g1, tglobaladdr:$g0),
555 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g3, 48),
556 tglobaladdr:$g2, 32),
557 tglobaladdr:$g1, 16),
558 tglobaladdr:$g0, 0)>;
560 def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
561 tblockaddress:$g1, tblockaddress:$g0),
562 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g3, 48),
563 tblockaddress:$g2, 32),
564 tblockaddress:$g1, 16),
565 tblockaddress:$g0, 0)>;
567 def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
568 tconstpool:$g1, tconstpool:$g0),
569 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g3, 48),
574 def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
575 tjumptable:$g1, tjumptable:$g0),
576 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g3, 48),
582 //===----------------------------------------------------------------------===//
583 // Arithmetic instructions.
584 //===----------------------------------------------------------------------===//
586 // Add/subtract with carry.
587 defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
588 defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
590 def : InstAlias<"ngc $dst, $src", (SBCWr GPR32:$dst, WZR, GPR32:$src)>;
591 def : InstAlias<"ngc $dst, $src", (SBCXr GPR64:$dst, XZR, GPR64:$src)>;
592 def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
593 def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
596 defm ADD : AddSub<0, "add", "sub", add>;
597 defm SUB : AddSub<1, "sub", "add">;
599 def : InstAlias<"mov $dst, $src",
600 (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
601 def : InstAlias<"mov $dst, $src",
602 (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
603 def : InstAlias<"mov $dst, $src",
604 (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
605 def : InstAlias<"mov $dst, $src",
606 (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
608 defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
609 defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
611 // Use SUBS instead of SUB to enable CSE between SUBS and SUB.
612 def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
613 (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
614 def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
615 (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
616 def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
617 (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
618 def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
619 (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
620 def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
621 (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
622 def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
623 (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
624 let AddedComplexity = 1 in {
625 def : Pat<(sub GPR32sp:$R2, arith_extended_reg32<i32>:$R3),
626 (SUBSWrx GPR32sp:$R2, arith_extended_reg32<i32>:$R3)>;
627 def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3),
628 (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3)>;
631 // Because of the immediate format for add/sub-imm instructions, the
632 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
633 // These patterns capture that transformation.
634 let AddedComplexity = 1 in {
635 def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
636 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
637 def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
638 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
639 def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
640 (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
641 def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
642 (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
645 // Because of the immediate format for add/sub-imm instructions, the
646 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
647 // These patterns capture that transformation.
648 let AddedComplexity = 1 in {
649 def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
650 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
651 def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
652 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
653 def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
654 (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
655 def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
656 (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
659 def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
660 def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
661 def : InstAlias<"neg $dst, $src$shift",
662 (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
663 def : InstAlias<"neg $dst, $src$shift",
664 (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
666 def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
667 def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
668 def : InstAlias<"negs $dst, $src$shift",
669 (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
670 def : InstAlias<"negs $dst, $src$shift",
671 (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
674 // Unsigned/Signed divide
675 defm UDIV : Div<0, "udiv", udiv>;
676 defm SDIV : Div<1, "sdiv", sdiv>;
677 let isCodeGenOnly = 1 in {
678 defm UDIV_Int : Div<0, "udiv", int_aarch64_udiv>;
679 defm SDIV_Int : Div<1, "sdiv", int_aarch64_sdiv>;
683 defm ASRV : Shift<0b10, "asr", sra>;
684 defm LSLV : Shift<0b00, "lsl", shl>;
685 defm LSRV : Shift<0b01, "lsr", srl>;
686 defm RORV : Shift<0b11, "ror", rotr>;
688 def : ShiftAlias<"asrv", ASRVWr, GPR32>;
689 def : ShiftAlias<"asrv", ASRVXr, GPR64>;
690 def : ShiftAlias<"lslv", LSLVWr, GPR32>;
691 def : ShiftAlias<"lslv", LSLVXr, GPR64>;
692 def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
693 def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
694 def : ShiftAlias<"rorv", RORVWr, GPR32>;
695 def : ShiftAlias<"rorv", RORVXr, GPR64>;
698 let AddedComplexity = 7 in {
699 defm MADD : MulAccum<0, "madd", add>;
700 defm MSUB : MulAccum<1, "msub", sub>;
702 def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
703 (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
704 def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
705 (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
707 def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
708 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
709 def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
710 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
711 def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
712 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
713 def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
714 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
715 } // AddedComplexity = 7
717 let AddedComplexity = 5 in {
718 def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
719 def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
720 def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
721 def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
723 def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
724 (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
725 def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
726 (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
728 def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
729 (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
730 def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
731 (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
732 } // AddedComplexity = 5
734 def : MulAccumWAlias<"mul", MADDWrrr>;
735 def : MulAccumXAlias<"mul", MADDXrrr>;
736 def : MulAccumWAlias<"mneg", MSUBWrrr>;
737 def : MulAccumXAlias<"mneg", MSUBXrrr>;
738 def : WideMulAccumAlias<"smull", SMADDLrrr>;
739 def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
740 def : WideMulAccumAlias<"umull", UMADDLrrr>;
741 def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
744 def SMULHrr : MulHi<0b010, "smulh", mulhs>;
745 def UMULHrr : MulHi<0b110, "umulh", mulhu>;
748 def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
749 def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
750 def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
751 def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
753 def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
754 def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
755 def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
756 def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
759 defm CAS : CompareAndSwap<0, 0, "">;
760 defm CASA : CompareAndSwap<1, 0, "a">;
761 defm CASL : CompareAndSwap<0, 1, "l">;
762 defm CASAL : CompareAndSwap<1, 1, "al">;
765 defm CASP : CompareAndSwapPair<0, 0, "">;
766 defm CASPA : CompareAndSwapPair<1, 0, "a">;
767 defm CASPL : CompareAndSwapPair<0, 1, "l">;
768 defm CASPAL : CompareAndSwapPair<1, 1, "al">;
771 defm SWP : Swap<0, 0, "">;
772 defm SWPA : Swap<1, 0, "a">;
773 defm SWPL : Swap<0, 1, "l">;
774 defm SWPAL : Swap<1, 1, "al">;
776 // v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
777 defm LDADD : LDOPregister<0b000, "add", 0, 0, "">;
778 defm LDADDA : LDOPregister<0b000, "add", 1, 0, "a">;
779 defm LDADDL : LDOPregister<0b000, "add", 0, 1, "l">;
780 defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
782 defm LDCLR : LDOPregister<0b001, "clr", 0, 0, "">;
783 defm LDCLRA : LDOPregister<0b001, "clr", 1, 0, "a">;
784 defm LDCLRL : LDOPregister<0b001, "clr", 0, 1, "l">;
785 defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
787 defm LDEOR : LDOPregister<0b010, "eor", 0, 0, "">;
788 defm LDEORA : LDOPregister<0b010, "eor", 1, 0, "a">;
789 defm LDEORL : LDOPregister<0b010, "eor", 0, 1, "l">;
790 defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
792 defm LDSET : LDOPregister<0b011, "set", 0, 0, "">;
793 defm LDSETA : LDOPregister<0b011, "set", 1, 0, "a">;
794 defm LDSETL : LDOPregister<0b011, "set", 0, 1, "l">;
795 defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
797 defm LDSMAX : LDOPregister<0b100, "smax", 0, 0, "">;
798 defm LDSMAXA : LDOPregister<0b100, "smax", 1, 0, "a">;
799 defm LDSMAXL : LDOPregister<0b100, "smax", 0, 1, "l">;
800 defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
802 defm LDSMIN : LDOPregister<0b101, "smin", 0, 0, "">;
803 defm LDSMINA : LDOPregister<0b101, "smin", 1, 0, "a">;
804 defm LDSMINL : LDOPregister<0b101, "smin", 0, 1, "l">;
805 defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
807 defm LDUMAX : LDOPregister<0b110, "umax", 0, 0, "">;
808 defm LDUMAXA : LDOPregister<0b110, "umax", 1, 0, "a">;
809 defm LDUMAXL : LDOPregister<0b110, "umax", 0, 1, "l">;
810 defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
812 defm LDUMIN : LDOPregister<0b111, "umin", 0, 0, "">;
813 defm LDUMINA : LDOPregister<0b111, "umin", 1, 0, "a">;
814 defm LDUMINL : LDOPregister<0b111, "umin", 0, 1, "l">;
815 defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
817 // v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
818 defm : STOPregister<"stadd","LDADD">; // STADDx
819 defm : STOPregister<"stclr","LDCLR">; // STCLRx
820 defm : STOPregister<"steor","LDEOR">; // STEORx
821 defm : STOPregister<"stset","LDSET">; // STSETx
822 defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
823 defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
824 defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
825 defm : STOPregister<"stumin","LDUMIN">;// STUMINx
827 //===----------------------------------------------------------------------===//
828 // Logical instructions.
829 //===----------------------------------------------------------------------===//
832 defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
833 defm AND : LogicalImm<0b00, "and", and, "bic">;
834 defm EOR : LogicalImm<0b10, "eor", xor, "eon">;
835 defm ORR : LogicalImm<0b01, "orr", or, "orn">;
837 // FIXME: these aliases *are* canonical sometimes (when movz can't be
838 // used). Actually, it seems to be working right now, but putting logical_immXX
839 // here is a bit dodgy on the AsmParser side too.
840 def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
841 logical_imm32:$imm), 0>;
842 def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
843 logical_imm64:$imm), 0>;
847 defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
848 defm BICS : LogicalRegS<0b11, 1, "bics",
849 BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
850 defm AND : LogicalReg<0b00, 0, "and", and>;
851 defm BIC : LogicalReg<0b00, 1, "bic",
852 BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
853 defm EON : LogicalReg<0b10, 1, "eon",
854 BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
855 defm EOR : LogicalReg<0b10, 0, "eor", xor>;
856 defm ORN : LogicalReg<0b01, 1, "orn",
857 BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
858 defm ORR : LogicalReg<0b01, 0, "orr", or>;
860 def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
861 def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
863 def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
864 def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
866 def : InstAlias<"mvn $Wd, $Wm$sh",
867 (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
868 def : InstAlias<"mvn $Xd, $Xm$sh",
869 (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
871 def : InstAlias<"tst $src1, $src2",
872 (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
873 def : InstAlias<"tst $src1, $src2",
874 (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
876 def : InstAlias<"tst $src1, $src2",
877 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
878 def : InstAlias<"tst $src1, $src2",
879 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
881 def : InstAlias<"tst $src1, $src2$sh",
882 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
883 def : InstAlias<"tst $src1, $src2$sh",
884 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
887 def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
888 def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
891 //===----------------------------------------------------------------------===//
892 // One operand data processing instructions.
893 //===----------------------------------------------------------------------===//
895 defm CLS : OneOperandData<0b101, "cls">;
896 defm CLZ : OneOperandData<0b100, "clz", ctlz>;
897 defm RBIT : OneOperandData<0b000, "rbit">;
899 def : Pat<(int_aarch64_rbit GPR32:$Rn), (RBITWr $Rn)>;
900 def : Pat<(int_aarch64_rbit GPR64:$Rn), (RBITXr $Rn)>;
902 def REV16Wr : OneWRegData<0b001, "rev16",
903 UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
904 def REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
906 def : Pat<(cttz GPR32:$Rn),
907 (CLZWr (RBITWr GPR32:$Rn))>;
908 def : Pat<(cttz GPR64:$Rn),
909 (CLZXr (RBITXr GPR64:$Rn))>;
910 def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
913 def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
917 // Unlike the other one operand instructions, the instructions with the "rev"
918 // mnemonic do *not* just different in the size bit, but actually use different
919 // opcode bits for the different sizes.
920 def REVWr : OneWRegData<0b010, "rev", bswap>;
921 def REVXr : OneXRegData<0b011, "rev", bswap>;
922 def REV32Xr : OneXRegData<0b010, "rev32",
923 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
925 def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
927 // The bswap commutes with the rotr so we want a pattern for both possible
929 def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
930 def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
932 //===----------------------------------------------------------------------===//
933 // Bitfield immediate extraction instruction.
934 //===----------------------------------------------------------------------===//
935 let hasSideEffects = 0 in
936 defm EXTR : ExtractImm<"extr">;
937 def : InstAlias<"ror $dst, $src, $shift",
938 (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
939 def : InstAlias<"ror $dst, $src, $shift",
940 (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
942 def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
943 (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
944 def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
945 (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
947 //===----------------------------------------------------------------------===//
948 // Other bitfield immediate instructions.
949 //===----------------------------------------------------------------------===//
950 let hasSideEffects = 0 in {
951 defm BFM : BitfieldImmWith2RegArgs<0b01, "bfm">;
952 defm SBFM : BitfieldImm<0b00, "sbfm">;
953 defm UBFM : BitfieldImm<0b10, "ubfm">;
956 def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
957 uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
958 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
961 def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
962 uint64_t enc = 31 - N->getZExtValue();
963 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
966 // min(7, 31 - shift_amt)
967 def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
968 uint64_t enc = 31 - N->getZExtValue();
969 enc = enc > 7 ? 7 : enc;
970 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
973 // min(15, 31 - shift_amt)
974 def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
975 uint64_t enc = 31 - N->getZExtValue();
976 enc = enc > 15 ? 15 : enc;
977 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
980 def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
981 uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
982 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
985 def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
986 uint64_t enc = 63 - N->getZExtValue();
987 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
990 // min(7, 63 - shift_amt)
991 def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
992 uint64_t enc = 63 - N->getZExtValue();
993 enc = enc > 7 ? 7 : enc;
994 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
997 // min(15, 63 - shift_amt)
998 def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
999 uint64_t enc = 63 - N->getZExtValue();
1000 enc = enc > 15 ? 15 : enc;
1001 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1004 // min(31, 63 - shift_amt)
1005 def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
1006 uint64_t enc = 63 - N->getZExtValue();
1007 enc = enc > 31 ? 31 : enc;
1008 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1011 def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
1012 (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
1013 (i64 (i32shift_b imm0_31:$imm)))>;
1014 def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
1015 (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
1016 (i64 (i64shift_b imm0_63:$imm)))>;
1018 let AddedComplexity = 10 in {
1019 def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
1020 (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1021 def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
1022 (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1025 def : InstAlias<"asr $dst, $src, $shift",
1026 (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1027 def : InstAlias<"asr $dst, $src, $shift",
1028 (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1029 def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1030 def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1031 def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1032 def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1033 def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1035 def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
1036 (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1037 def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
1038 (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1040 def : InstAlias<"lsr $dst, $src, $shift",
1041 (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1042 def : InstAlias<"lsr $dst, $src, $shift",
1043 (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1044 def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1045 def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1046 def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1047 def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1048 def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1050 //===----------------------------------------------------------------------===//
1051 // Conditional comparison instructions.
1052 //===----------------------------------------------------------------------===//
1053 defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
1054 defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
1056 //===----------------------------------------------------------------------===//
1057 // Conditional select instructions.
1058 //===----------------------------------------------------------------------===//
1059 defm CSEL : CondSelect<0, 0b00, "csel">;
1061 def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
1062 defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
1063 defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
1064 defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
1066 def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1067 (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1068 def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1069 (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1070 def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1071 (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1072 def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1073 (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1074 def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1075 (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1076 def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1077 (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1079 def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
1080 (CSINCWr WZR, WZR, (i32 imm:$cc))>;
1081 def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
1082 (CSINCXr XZR, XZR, (i32 imm:$cc))>;
1083 def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
1084 (CSINVWr WZR, WZR, (i32 imm:$cc))>;
1085 def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
1086 (CSINVXr XZR, XZR, (i32 imm:$cc))>;
1088 // The inverse of the condition code from the alias instruction is what is used
1089 // in the aliased instruction. The parser all ready inverts the condition code
1090 // for these aliases.
1091 def : InstAlias<"cset $dst, $cc",
1092 (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1093 def : InstAlias<"cset $dst, $cc",
1094 (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1096 def : InstAlias<"csetm $dst, $cc",
1097 (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1098 def : InstAlias<"csetm $dst, $cc",
1099 (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1101 def : InstAlias<"cinc $dst, $src, $cc",
1102 (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1103 def : InstAlias<"cinc $dst, $src, $cc",
1104 (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1106 def : InstAlias<"cinv $dst, $src, $cc",
1107 (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1108 def : InstAlias<"cinv $dst, $src, $cc",
1109 (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1111 def : InstAlias<"cneg $dst, $src, $cc",
1112 (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1113 def : InstAlias<"cneg $dst, $src, $cc",
1114 (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1116 //===----------------------------------------------------------------------===//
1117 // PC-relative instructions.
1118 //===----------------------------------------------------------------------===//
1119 let isReMaterializable = 1 in {
1120 let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
1121 def ADR : ADRI<0, "adr", adrlabel, []>;
1122 } // hasSideEffects = 0
1124 def ADRP : ADRI<1, "adrp", adrplabel,
1125 [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
1126 } // isReMaterializable = 1
1128 // page address of a constant pool entry, block address
1129 def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
1130 def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
1132 //===----------------------------------------------------------------------===//
1133 // Unconditional branch (register) instructions.
1134 //===----------------------------------------------------------------------===//
1136 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1137 def RET : BranchReg<0b0010, "ret", []>;
1138 def DRPS : SpecialReturn<0b0101, "drps">;
1139 def ERET : SpecialReturn<0b0100, "eret">;
1140 } // isReturn = 1, isTerminator = 1, isBarrier = 1
1142 // Default to the LR register.
1143 def : InstAlias<"ret", (RET LR)>;
1145 let isCall = 1, Defs = [LR], Uses = [SP] in {
1146 def BLR : BranchReg<0b0001, "blr", [(AArch64call GPR64:$Rn)]>;
1149 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
1150 def BR : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
1151 } // isBranch, isTerminator, isBarrier, isIndirectBranch
1153 // Create a separate pseudo-instruction for codegen to use so that we don't
1154 // flag lr as used in every function. It'll be restored before the RET by the
1155 // epilogue if it's legitimately used.
1156 def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]> {
1157 let isTerminator = 1;
1162 // This is a directive-like pseudo-instruction. The purpose is to insert an
1163 // R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
1164 // (which in the usual case is a BLR).
1165 let hasSideEffects = 1 in
1166 def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []> {
1167 let AsmString = ".tlsdesccall $sym";
1170 // FIXME: maybe the scratch register used shouldn't be fixed to X1?
1171 // FIXME: can "hasSideEffects be dropped?
1172 let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
1173 isCodeGenOnly = 1 in
1175 : Pseudo<(outs), (ins i64imm:$sym),
1176 [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>;
1177 def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
1178 (TLSDESC_CALLSEQ texternalsym:$sym)>;
1180 //===----------------------------------------------------------------------===//
1181 // Conditional branch (immediate) instruction.
1182 //===----------------------------------------------------------------------===//
1183 def Bcc : BranchCond;
1185 //===----------------------------------------------------------------------===//
1186 // Compare-and-branch instructions.
1187 //===----------------------------------------------------------------------===//
1188 defm CBZ : CmpBranch<0, "cbz", AArch64cbz>;
1189 defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
1191 //===----------------------------------------------------------------------===//
1192 // Test-bit-and-branch instructions.
1193 //===----------------------------------------------------------------------===//
1194 defm TBZ : TestBranch<0, "tbz", AArch64tbz>;
1195 defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
1197 //===----------------------------------------------------------------------===//
1198 // Unconditional branch (immediate) instructions.
1199 //===----------------------------------------------------------------------===//
1200 let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
1201 def B : BranchImm<0, "b", [(br bb:$addr)]>;
1202 } // isBranch, isTerminator, isBarrier
1204 let isCall = 1, Defs = [LR], Uses = [SP] in {
1205 def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
1207 def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
1209 //===----------------------------------------------------------------------===//
1210 // Exception generation instructions.
1211 //===----------------------------------------------------------------------===//
1212 def BRK : ExceptionGeneration<0b001, 0b00, "brk">;
1213 def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
1214 def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
1215 def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">;
1216 def HLT : ExceptionGeneration<0b010, 0b00, "hlt">;
1217 def HVC : ExceptionGeneration<0b000, 0b10, "hvc">;
1218 def SMC : ExceptionGeneration<0b000, 0b11, "smc">;
1219 def SVC : ExceptionGeneration<0b000, 0b01, "svc">;
1221 // DCPSn defaults to an immediate operand of zero if unspecified.
1222 def : InstAlias<"dcps1", (DCPS1 0)>;
1223 def : InstAlias<"dcps2", (DCPS2 0)>;
1224 def : InstAlias<"dcps3", (DCPS3 0)>;
1226 //===----------------------------------------------------------------------===//
1227 // Load instructions.
1228 //===----------------------------------------------------------------------===//
1230 // Pair (indexed, offset)
1231 defm LDPW : LoadPairOffset<0b00, 0, GPR32, simm7s4, "ldp">;
1232 defm LDPX : LoadPairOffset<0b10, 0, GPR64, simm7s8, "ldp">;
1233 defm LDPS : LoadPairOffset<0b00, 1, FPR32, simm7s4, "ldp">;
1234 defm LDPD : LoadPairOffset<0b01, 1, FPR64, simm7s8, "ldp">;
1235 defm LDPQ : LoadPairOffset<0b10, 1, FPR128, simm7s16, "ldp">;
1237 defm LDPSW : LoadPairOffset<0b01, 0, GPR64, simm7s4, "ldpsw">;
1239 // Pair (pre-indexed)
1240 def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32, simm7s4, "ldp">;
1241 def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64, simm7s8, "ldp">;
1242 def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32, simm7s4, "ldp">;
1243 def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64, simm7s8, "ldp">;
1244 def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128, simm7s16, "ldp">;
1246 def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64, simm7s4, "ldpsw">;
1248 // Pair (post-indexed)
1249 def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32, simm7s4, "ldp">;
1250 def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64, simm7s8, "ldp">;
1251 def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32, simm7s4, "ldp">;
1252 def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64, simm7s8, "ldp">;
1253 def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128, simm7s16, "ldp">;
1255 def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64, simm7s4, "ldpsw">;
1258 // Pair (no allocate)
1259 defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32, simm7s4, "ldnp">;
1260 defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64, simm7s8, "ldnp">;
1261 defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32, simm7s4, "ldnp">;
1262 defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64, simm7s8, "ldnp">;
1263 defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128, simm7s16, "ldnp">;
1266 // (register offset)
1270 defm LDRBB : Load8RO<0b00, 0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
1271 defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
1272 defm LDRW : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
1273 defm LDRX : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
1276 defm LDRB : Load8RO<0b00, 1, 0b01, FPR8, "ldr", untyped, load>;
1277 defm LDRH : Load16RO<0b01, 1, 0b01, FPR16, "ldr", f16, load>;
1278 defm LDRS : Load32RO<0b10, 1, 0b01, FPR32, "ldr", f32, load>;
1279 defm LDRD : Load64RO<0b11, 1, 0b01, FPR64, "ldr", f64, load>;
1280 defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128, "ldr", f128, load>;
1282 // Load sign-extended half-word
1283 defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
1284 defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
1286 // Load sign-extended byte
1287 defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
1288 defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
1290 // Load sign-extended word
1291 defm LDRSW : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
1294 defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
1296 // For regular load, we do not have any alignment requirement.
1297 // Thus, it is safe to directly map the vector loads with interesting
1298 // addressing modes.
1299 // FIXME: We could do the same for bitconvert to floating point vectors.
1300 multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
1301 ValueType ScalTy, ValueType VecTy,
1302 Instruction LOADW, Instruction LOADX,
1304 def : Pat<(VecTy (scalar_to_vector (ScalTy
1305 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
1306 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
1307 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
1310 def : Pat<(VecTy (scalar_to_vector (ScalTy
1311 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
1312 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
1313 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
1317 let AddedComplexity = 10 in {
1318 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v8i8, LDRBroW, LDRBroX, bsub>;
1319 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>;
1321 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
1322 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
1324 defm : ScalToVecROLoadPat<ro16, load, i32, v4f16, LDRHroW, LDRHroX, hsub>;
1325 defm : ScalToVecROLoadPat<ro16, load, i32, v8f16, LDRHroW, LDRHroX, hsub>;
1327 defm : ScalToVecROLoadPat<ro32, load, i32, v2i32, LDRSroW, LDRSroX, ssub>;
1328 defm : ScalToVecROLoadPat<ro32, load, i32, v4i32, LDRSroW, LDRSroX, ssub>;
1330 defm : ScalToVecROLoadPat<ro32, load, f32, v2f32, LDRSroW, LDRSroX, ssub>;
1331 defm : ScalToVecROLoadPat<ro32, load, f32, v4f32, LDRSroW, LDRSroX, ssub>;
1333 defm : ScalToVecROLoadPat<ro64, load, i64, v2i64, LDRDroW, LDRDroX, dsub>;
1335 defm : ScalToVecROLoadPat<ro64, load, f64, v2f64, LDRDroW, LDRDroX, dsub>;
1338 def : Pat <(v1i64 (scalar_to_vector (i64
1339 (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
1340 ro_Wextend64:$extend))))),
1341 (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
1343 def : Pat <(v1i64 (scalar_to_vector (i64
1344 (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
1345 ro_Xextend64:$extend))))),
1346 (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
1349 // Match all load 64 bits width whose type is compatible with FPR64
1350 multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
1351 Instruction LOADW, Instruction LOADX> {
1353 def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1354 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
1356 def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1357 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
1360 let AddedComplexity = 10 in {
1361 let Predicates = [IsLE] in {
1362 // We must do vector loads with LD1 in big-endian.
1363 defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
1364 defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
1365 defm : VecROLoadPat<ro64, v8i8, LDRDroW, LDRDroX>;
1366 defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
1367 defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
1370 defm : VecROLoadPat<ro64, v1i64, LDRDroW, LDRDroX>;
1371 defm : VecROLoadPat<ro64, v1f64, LDRDroW, LDRDroX>;
1373 // Match all load 128 bits width whose type is compatible with FPR128
1374 let Predicates = [IsLE] in {
1375 // We must do vector loads with LD1 in big-endian.
1376 defm : VecROLoadPat<ro128, v2i64, LDRQroW, LDRQroX>;
1377 defm : VecROLoadPat<ro128, v2f64, LDRQroW, LDRQroX>;
1378 defm : VecROLoadPat<ro128, v4i32, LDRQroW, LDRQroX>;
1379 defm : VecROLoadPat<ro128, v4f32, LDRQroW, LDRQroX>;
1380 defm : VecROLoadPat<ro128, v8i16, LDRQroW, LDRQroX>;
1381 defm : VecROLoadPat<ro128, v8f16, LDRQroW, LDRQroX>;
1382 defm : VecROLoadPat<ro128, v16i8, LDRQroW, LDRQroX>;
1384 } // AddedComplexity = 10
1387 multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
1388 Instruction INSTW, Instruction INSTX> {
1389 def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1390 (SUBREG_TO_REG (i64 0),
1391 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
1394 def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1395 (SUBREG_TO_REG (i64 0),
1396 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
1400 let AddedComplexity = 10 in {
1401 defm : ExtLoadTo64ROPat<ro8, zextloadi8, LDRBBroW, LDRBBroX>;
1402 defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
1403 defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW, LDRWroX>;
1405 // zextloadi1 -> zextloadi8
1406 defm : ExtLoadTo64ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
1408 // extload -> zextload
1409 defm : ExtLoadTo64ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
1410 defm : ExtLoadTo64ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
1411 defm : ExtLoadTo64ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
1413 // extloadi1 -> zextloadi8
1414 defm : ExtLoadTo64ROPat<ro8, extloadi1, LDRBBroW, LDRBBroX>;
1419 multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
1420 Instruction INSTW, Instruction INSTX> {
1421 def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1422 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
1424 def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1425 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
1429 let AddedComplexity = 10 in {
1430 // extload -> zextload
1431 defm : ExtLoadTo32ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
1432 defm : ExtLoadTo32ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
1433 defm : ExtLoadTo32ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
1435 // zextloadi1 -> zextloadi8
1436 defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
1440 // (unsigned immediate)
1442 defm LDRX : LoadUI<0b11, 0, 0b01, GPR64, uimm12s8, "ldr",
1444 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
1445 defm LDRW : LoadUI<0b10, 0, 0b01, GPR32, uimm12s4, "ldr",
1447 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
1448 defm LDRB : LoadUI<0b00, 1, 0b01, FPR8, uimm12s1, "ldr",
1450 (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
1451 defm LDRH : LoadUI<0b01, 1, 0b01, FPR16, uimm12s2, "ldr",
1452 [(set (f16 FPR16:$Rt),
1453 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
1454 defm LDRS : LoadUI<0b10, 1, 0b01, FPR32, uimm12s4, "ldr",
1455 [(set (f32 FPR32:$Rt),
1456 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
1457 defm LDRD : LoadUI<0b11, 1, 0b01, FPR64, uimm12s8, "ldr",
1458 [(set (f64 FPR64:$Rt),
1459 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
1460 defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128, uimm12s16, "ldr",
1461 [(set (f128 FPR128:$Rt),
1462 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
1464 // For regular load, we do not have any alignment requirement.
1465 // Thus, it is safe to directly map the vector loads with interesting
1466 // addressing modes.
1467 // FIXME: We could do the same for bitconvert to floating point vectors.
1468 def : Pat <(v8i8 (scalar_to_vector (i32
1469 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
1470 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
1471 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
1472 def : Pat <(v16i8 (scalar_to_vector (i32
1473 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
1474 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
1475 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
1476 def : Pat <(v4i16 (scalar_to_vector (i32
1477 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
1478 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
1479 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
1480 def : Pat <(v8i16 (scalar_to_vector (i32
1481 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
1482 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
1483 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
1484 def : Pat <(v2i32 (scalar_to_vector (i32
1485 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
1486 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
1487 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
1488 def : Pat <(v4i32 (scalar_to_vector (i32
1489 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
1490 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
1491 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
1492 def : Pat <(v1i64 (scalar_to_vector (i64
1493 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
1494 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1495 def : Pat <(v2i64 (scalar_to_vector (i64
1496 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
1497 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
1498 (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
1500 // Match all load 64 bits width whose type is compatible with FPR64
1501 let Predicates = [IsLE] in {
1502 // We must use LD1 to perform vector loads in big-endian.
1503 def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1504 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1505 def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1506 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1507 def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1508 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1509 def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1510 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1511 def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1512 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1514 def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1515 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1516 def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1517 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1519 // Match all load 128 bits width whose type is compatible with FPR128
1520 let Predicates = [IsLE] in {
1521 // We must use LD1 to perform vector loads in big-endian.
1522 def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1523 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1524 def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1525 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1526 def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1527 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1528 def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1529 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1530 def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1531 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1532 def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1533 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1534 def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1535 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1537 def : Pat<(f128 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1538 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1540 defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
1542 (zextloadi16 (am_indexed16 GPR64sp:$Rn,
1543 uimm12s2:$offset)))]>;
1544 defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
1546 (zextloadi8 (am_indexed8 GPR64sp:$Rn,
1547 uimm12s1:$offset)))]>;
1549 def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1550 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1551 def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
1552 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
1554 // zextloadi1 -> zextloadi8
1555 def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1556 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
1557 def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1558 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1560 // extload -> zextload
1561 def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
1562 (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
1563 def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1564 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
1565 def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1566 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
1567 def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
1568 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
1569 def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
1570 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
1571 def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1572 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1573 def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1574 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1576 // load sign-extended half-word
1577 defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
1579 (sextloadi16 (am_indexed16 GPR64sp:$Rn,
1580 uimm12s2:$offset)))]>;
1581 defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
1583 (sextloadi16 (am_indexed16 GPR64sp:$Rn,
1584 uimm12s2:$offset)))]>;
1586 // load sign-extended byte
1587 defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
1589 (sextloadi8 (am_indexed8 GPR64sp:$Rn,
1590 uimm12s1:$offset)))]>;
1591 defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
1593 (sextloadi8 (am_indexed8 GPR64sp:$Rn,
1594 uimm12s1:$offset)))]>;
1596 // load sign-extended word
1597 defm LDRSW : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
1599 (sextloadi32 (am_indexed32 GPR64sp:$Rn,
1600 uimm12s4:$offset)))]>;
1602 // load zero-extended word
1603 def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
1604 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
1607 def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
1608 [(AArch64Prefetch imm:$Rt,
1609 (am_indexed64 GPR64sp:$Rn,
1610 uimm12s8:$offset))]>;
1612 def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
1616 def LDRWl : LoadLiteral<0b00, 0, GPR32, "ldr">;
1617 def LDRXl : LoadLiteral<0b01, 0, GPR64, "ldr">;
1618 def LDRSl : LoadLiteral<0b00, 1, FPR32, "ldr">;
1619 def LDRDl : LoadLiteral<0b01, 1, FPR64, "ldr">;
1620 def LDRQl : LoadLiteral<0b10, 1, FPR128, "ldr">;
1622 // load sign-extended word
1623 def LDRSWl : LoadLiteral<0b10, 0, GPR64, "ldrsw">;
1626 def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
1627 // [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
1630 // (unscaled immediate)
1631 defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64, "ldur",
1633 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
1634 defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32, "ldur",
1636 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
1637 defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8, "ldur",
1639 (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
1640 defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16, "ldur",
1642 (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1643 defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32, "ldur",
1644 [(set (f32 FPR32:$Rt),
1645 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
1646 defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64, "ldur",
1647 [(set (f64 FPR64:$Rt),
1648 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
1649 defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128, "ldur",
1650 [(set (f128 FPR128:$Rt),
1651 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
1654 : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
1656 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1658 : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
1660 (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1662 // Match all load 64 bits width whose type is compatible with FPR64
1663 let Predicates = [IsLE] in {
1664 def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1665 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1666 def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1667 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1668 def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1669 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1670 def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1671 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1672 def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1673 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1675 def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1676 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1677 def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1678 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1680 // Match all load 128 bits width whose type is compatible with FPR128
1681 let Predicates = [IsLE] in {
1682 def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1683 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1684 def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1685 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1686 def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1687 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1688 def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1689 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1690 def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1691 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1692 def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1693 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1694 def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1695 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1699 def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1700 (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
1701 def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1702 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1703 def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1704 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1705 def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
1706 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1707 def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1708 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1709 def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1710 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1711 def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1712 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1714 def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1715 (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
1716 def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1717 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1718 def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1719 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1720 def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
1721 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1722 def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1723 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1724 def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1725 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1726 def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1727 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1731 // LDR mnemonics fall back to LDUR for negative or unaligned offsets.
1733 // Define new assembler match classes as we want to only match these when
1734 // the don't otherwise match the scaled addressing mode for LDR/STR. Don't
1735 // associate a DiagnosticType either, as we want the diagnostic for the
1736 // canonical form (the scaled operand) to take precedence.
1737 class SImm9OffsetOperand<int Width> : AsmOperandClass {
1738 let Name = "SImm9OffsetFB" # Width;
1739 let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
1740 let RenderMethod = "addImmOperands";
1743 def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
1744 def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
1745 def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
1746 def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
1747 def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
1749 def simm9_offset_fb8 : Operand<i64> {
1750 let ParserMatchClass = SImm9OffsetFB8Operand;
1752 def simm9_offset_fb16 : Operand<i64> {
1753 let ParserMatchClass = SImm9OffsetFB16Operand;
1755 def simm9_offset_fb32 : Operand<i64> {
1756 let ParserMatchClass = SImm9OffsetFB32Operand;
1758 def simm9_offset_fb64 : Operand<i64> {
1759 let ParserMatchClass = SImm9OffsetFB64Operand;
1761 def simm9_offset_fb128 : Operand<i64> {
1762 let ParserMatchClass = SImm9OffsetFB128Operand;
1765 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1766 (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
1767 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1768 (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
1769 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1770 (LDURBi FPR8:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1771 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1772 (LDURHi FPR16:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1773 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1774 (LDURSi FPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
1775 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1776 (LDURDi FPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
1777 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1778 (LDURQi FPR128:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
1781 def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1782 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1783 def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1784 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1786 // load sign-extended half-word
1788 : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
1790 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1792 : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
1794 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1796 // load sign-extended byte
1798 : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
1800 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
1802 : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
1804 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
1806 // load sign-extended word
1808 : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
1810 (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
1812 // zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
1813 def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
1814 (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1815 def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
1816 (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1817 def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
1818 (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1819 def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
1820 (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1821 def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
1822 (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1823 def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
1824 (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1825 def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
1826 (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
1829 defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
1830 [(AArch64Prefetch imm:$Rt,
1831 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
1834 // (unscaled immediate, unprivileged)
1835 defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
1836 defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
1838 defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
1839 defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
1841 // load sign-extended half-word
1842 defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
1843 defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
1845 // load sign-extended byte
1846 defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
1847 defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
1849 // load sign-extended word
1850 defm LDTRSW : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
1853 // (immediate pre-indexed)
1854 def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32, "ldr">;
1855 def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64, "ldr">;
1856 def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8, "ldr">;
1857 def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16, "ldr">;
1858 def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32, "ldr">;
1859 def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64, "ldr">;
1860 def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128, "ldr">;
1862 // load sign-extended half-word
1863 def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32, "ldrsh">;
1864 def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64, "ldrsh">;
1866 // load sign-extended byte
1867 def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32, "ldrsb">;
1868 def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64, "ldrsb">;
1870 // load zero-extended byte
1871 def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32, "ldrb">;
1872 def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32, "ldrh">;
1874 // load sign-extended word
1875 def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
1878 // (immediate post-indexed)
1879 def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32, "ldr">;
1880 def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64, "ldr">;
1881 def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8, "ldr">;
1882 def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16, "ldr">;
1883 def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32, "ldr">;
1884 def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64, "ldr">;
1885 def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128, "ldr">;
1887 // load sign-extended half-word
1888 def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32, "ldrsh">;
1889 def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64, "ldrsh">;
1891 // load sign-extended byte
1892 def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32, "ldrsb">;
1893 def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64, "ldrsb">;
1895 // load zero-extended byte
1896 def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32, "ldrb">;
1897 def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32, "ldrh">;
1899 // load sign-extended word
1900 def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
1902 //===----------------------------------------------------------------------===//
1903 // Store instructions.
1904 //===----------------------------------------------------------------------===//
1906 // Pair (indexed, offset)
1907 // FIXME: Use dedicated range-checked addressing mode operand here.
1908 defm STPW : StorePairOffset<0b00, 0, GPR32, simm7s4, "stp">;
1909 defm STPX : StorePairOffset<0b10, 0, GPR64, simm7s8, "stp">;
1910 defm STPS : StorePairOffset<0b00, 1, FPR32, simm7s4, "stp">;
1911 defm STPD : StorePairOffset<0b01, 1, FPR64, simm7s8, "stp">;
1912 defm STPQ : StorePairOffset<0b10, 1, FPR128, simm7s16, "stp">;
1914 // Pair (pre-indexed)
1915 def STPWpre : StorePairPreIdx<0b00, 0, GPR32, simm7s4, "stp">;
1916 def STPXpre : StorePairPreIdx<0b10, 0, GPR64, simm7s8, "stp">;
1917 def STPSpre : StorePairPreIdx<0b00, 1, FPR32, simm7s4, "stp">;
1918 def STPDpre : StorePairPreIdx<0b01, 1, FPR64, simm7s8, "stp">;
1919 def STPQpre : StorePairPreIdx<0b10, 1, FPR128, simm7s16, "stp">;
1921 // Pair (pre-indexed)
1922 def STPWpost : StorePairPostIdx<0b00, 0, GPR32, simm7s4, "stp">;
1923 def STPXpost : StorePairPostIdx<0b10, 0, GPR64, simm7s8, "stp">;
1924 def STPSpost : StorePairPostIdx<0b00, 1, FPR32, simm7s4, "stp">;
1925 def STPDpost : StorePairPostIdx<0b01, 1, FPR64, simm7s8, "stp">;
1926 def STPQpost : StorePairPostIdx<0b10, 1, FPR128, simm7s16, "stp">;
1928 // Pair (no allocate)
1929 defm STNPW : StorePairNoAlloc<0b00, 0, GPR32, simm7s4, "stnp">;
1930 defm STNPX : StorePairNoAlloc<0b10, 0, GPR64, simm7s8, "stnp">;
1931 defm STNPS : StorePairNoAlloc<0b00, 1, FPR32, simm7s4, "stnp">;
1932 defm STNPD : StorePairNoAlloc<0b01, 1, FPR64, simm7s8, "stnp">;
1933 defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128, simm7s16, "stnp">;
1936 // (Register offset)
1939 defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
1940 defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
1941 defm STRW : Store32RO<0b10, 0, 0b00, GPR32, "str", i32, store>;
1942 defm STRX : Store64RO<0b11, 0, 0b00, GPR64, "str", i64, store>;
1946 defm STRB : Store8RO< 0b00, 1, 0b00, FPR8, "str", untyped, store>;
1947 defm STRH : Store16RO<0b01, 1, 0b00, FPR16, "str", f16, store>;
1948 defm STRS : Store32RO<0b10, 1, 0b00, FPR32, "str", f32, store>;
1949 defm STRD : Store64RO<0b11, 1, 0b00, FPR64, "str", f64, store>;
1950 defm STRQ : Store128RO<0b00, 1, 0b10, FPR128, "str", f128, store>;
1952 multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
1953 Instruction STRW, Instruction STRX> {
1955 def : Pat<(storeop GPR64:$Rt,
1956 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
1957 (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
1958 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
1960 def : Pat<(storeop GPR64:$Rt,
1961 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
1962 (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
1963 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
1966 let AddedComplexity = 10 in {
1968 defm : TruncStoreFrom64ROPat<ro8, truncstorei8, STRBBroW, STRBBroX>;
1969 defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
1970 defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW, STRWroX>;
1973 multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
1974 Instruction STRW, Instruction STRX> {
1975 def : Pat<(store (VecTy FPR:$Rt),
1976 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
1977 (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
1979 def : Pat<(store (VecTy FPR:$Rt),
1980 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
1981 (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
1984 let AddedComplexity = 10 in {
1985 // Match all store 64 bits width whose type is compatible with FPR64
1986 let Predicates = [IsLE] in {
1987 // We must use ST1 to store vectors in big-endian.
1988 defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
1989 defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
1990 defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
1991 defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
1992 defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
1995 defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
1996 defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
1998 // Match all store 128 bits width whose type is compatible with FPR128
1999 let Predicates = [IsLE] in {
2000 // We must use ST1 to store vectors in big-endian.
2001 defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
2002 defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
2003 defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
2004 defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
2005 defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
2006 defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
2007 defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
2009 } // AddedComplexity = 10
2011 // Match stores from lane 0 to the appropriate subreg's store.
2012 multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
2013 ValueType VecTy, ValueType STy,
2014 SubRegIndex SubRegIdx,
2015 Instruction STRW, Instruction STRX> {
2017 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2018 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2019 (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2020 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2022 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2023 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2024 (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2025 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2028 let AddedComplexity = 19 in {
2029 defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
2030 defm : VecROStoreLane0Pat<ro16, store , v8i16, i16, hsub, STRHroW, STRHroX>;
2031 defm : VecROStoreLane0Pat<ro32, truncstorei32, v4i32, i32, ssub, STRSroW, STRSroX>;
2032 defm : VecROStoreLane0Pat<ro32, store , v4i32, i32, ssub, STRSroW, STRSroX>;
2033 defm : VecROStoreLane0Pat<ro32, store , v4f32, f32, ssub, STRSroW, STRSroX>;
2034 defm : VecROStoreLane0Pat<ro64, store , v2i64, i64, dsub, STRDroW, STRDroX>;
2035 defm : VecROStoreLane0Pat<ro64, store , v2f64, f64, dsub, STRDroW, STRDroX>;
2039 // (unsigned immediate)
2040 defm STRX : StoreUI<0b11, 0, 0b00, GPR64, uimm12s8, "str",
2042 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2043 defm STRW : StoreUI<0b10, 0, 0b00, GPR32, uimm12s4, "str",
2045 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2046 defm STRB : StoreUI<0b00, 1, 0b00, FPR8, uimm12s1, "str",
2048 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
2049 defm STRH : StoreUI<0b01, 1, 0b00, FPR16, uimm12s2, "str",
2050 [(store (f16 FPR16:$Rt),
2051 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
2052 defm STRS : StoreUI<0b10, 1, 0b00, FPR32, uimm12s4, "str",
2053 [(store (f32 FPR32:$Rt),
2054 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2055 defm STRD : StoreUI<0b11, 1, 0b00, FPR64, uimm12s8, "str",
2056 [(store (f64 FPR64:$Rt),
2057 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2058 defm STRQ : StoreUI<0b00, 1, 0b10, FPR128, uimm12s16, "str", []>;
2060 defm STRHH : StoreUI<0b01, 0, 0b00, GPR32, uimm12s2, "strh",
2061 [(truncstorei16 GPR32:$Rt,
2062 (am_indexed16 GPR64sp:$Rn,
2063 uimm12s2:$offset))]>;
2064 defm STRBB : StoreUI<0b00, 0, 0b00, GPR32, uimm12s1, "strb",
2065 [(truncstorei8 GPR32:$Rt,
2066 (am_indexed8 GPR64sp:$Rn,
2067 uimm12s1:$offset))]>;
2069 // Match all store 64 bits width whose type is compatible with FPR64
2070 let AddedComplexity = 10 in {
2071 let Predicates = [IsLE] in {
2072 // We must use ST1 to store vectors in big-endian.
2073 def : Pat<(store (v2f32 FPR64:$Rt),
2074 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2075 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2076 def : Pat<(store (v8i8 FPR64:$Rt),
2077 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2078 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2079 def : Pat<(store (v4i16 FPR64:$Rt),
2080 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2081 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2082 def : Pat<(store (v2i32 FPR64:$Rt),
2083 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2084 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2085 def : Pat<(store (v4f16 FPR64:$Rt),
2086 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2087 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2089 def : Pat<(store (v1f64 FPR64:$Rt),
2090 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2091 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2092 def : Pat<(store (v1i64 FPR64:$Rt),
2093 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2094 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2096 // Match all store 128 bits width whose type is compatible with FPR128
2097 let Predicates = [IsLE] in {
2098 // We must use ST1 to store vectors in big-endian.
2099 def : Pat<(store (v4f32 FPR128:$Rt),
2100 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2101 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2102 def : Pat<(store (v2f64 FPR128:$Rt),
2103 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2104 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2105 def : Pat<(store (v16i8 FPR128:$Rt),
2106 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2107 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2108 def : Pat<(store (v8i16 FPR128:$Rt),
2109 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2110 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2111 def : Pat<(store (v4i32 FPR128:$Rt),
2112 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2113 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2114 def : Pat<(store (v2i64 FPR128:$Rt),
2115 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2116 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2117 def : Pat<(store (v8f16 FPR128:$Rt),
2118 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2119 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2121 def : Pat<(store (f128 FPR128:$Rt),
2122 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2123 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2126 def : Pat<(truncstorei32 GPR64:$Rt,
2127 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
2128 (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
2129 def : Pat<(truncstorei16 GPR64:$Rt,
2130 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
2131 (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
2132 def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
2133 (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
2135 } // AddedComplexity = 10
2138 // (unscaled immediate)
2139 defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64, "stur",
2141 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2142 defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32, "stur",
2144 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2145 defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8, "stur",
2147 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2148 defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16, "stur",
2149 [(store (f16 FPR16:$Rt),
2150 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2151 defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32, "stur",
2152 [(store (f32 FPR32:$Rt),
2153 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2154 defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64, "stur",
2155 [(store (f64 FPR64:$Rt),
2156 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2157 defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128, "stur",
2158 [(store (f128 FPR128:$Rt),
2159 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
2160 defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32, "sturh",
2161 [(truncstorei16 GPR32:$Rt,
2162 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2163 defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32, "sturb",
2164 [(truncstorei8 GPR32:$Rt,
2165 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2167 // Match all store 64 bits width whose type is compatible with FPR64
2168 let Predicates = [IsLE] in {
2169 // We must use ST1 to store vectors in big-endian.
2170 def : Pat<(store (v2f32 FPR64:$Rt),
2171 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2172 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2173 def : Pat<(store (v8i8 FPR64:$Rt),
2174 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2175 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2176 def : Pat<(store (v4i16 FPR64:$Rt),
2177 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2178 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2179 def : Pat<(store (v2i32 FPR64:$Rt),
2180 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2181 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2182 def : Pat<(store (v4f16 FPR64:$Rt),
2183 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2184 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2186 def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2187 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2188 def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2189 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2191 // Match all store 128 bits width whose type is compatible with FPR128
2192 let Predicates = [IsLE] in {
2193 // We must use ST1 to store vectors in big-endian.
2194 def : Pat<(store (v4f32 FPR128:$Rt),
2195 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2196 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2197 def : Pat<(store (v2f64 FPR128:$Rt),
2198 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2199 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2200 def : Pat<(store (v16i8 FPR128:$Rt),
2201 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2202 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2203 def : Pat<(store (v8i16 FPR128:$Rt),
2204 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2205 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2206 def : Pat<(store (v4i32 FPR128:$Rt),
2207 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2208 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2209 def : Pat<(store (v2i64 FPR128:$Rt),
2210 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2211 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2212 def : Pat<(store (v2f64 FPR128:$Rt),
2213 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2214 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2215 def : Pat<(store (v8f16 FPR128:$Rt),
2216 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2217 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2220 // unscaled i64 truncating stores
2221 def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
2222 (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2223 def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
2224 (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2225 def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
2226 (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2229 // STR mnemonics fall back to STUR for negative or unaligned offsets.
2230 def : InstAlias<"str $Rt, [$Rn, $offset]",
2231 (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2232 def : InstAlias<"str $Rt, [$Rn, $offset]",
2233 (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2234 def : InstAlias<"str $Rt, [$Rn, $offset]",
2235 (STURBi FPR8:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2236 def : InstAlias<"str $Rt, [$Rn, $offset]",
2237 (STURHi FPR16:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2238 def : InstAlias<"str $Rt, [$Rn, $offset]",
2239 (STURSi FPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2240 def : InstAlias<"str $Rt, [$Rn, $offset]",
2241 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2242 def : InstAlias<"str $Rt, [$Rn, $offset]",
2243 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
2245 def : InstAlias<"strb $Rt, [$Rn, $offset]",
2246 (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2247 def : InstAlias<"strh $Rt, [$Rn, $offset]",
2248 (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2251 // (unscaled immediate, unprivileged)
2252 defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
2253 defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
2255 defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
2256 defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
2259 // (immediate pre-indexed)
2260 def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32, "str", pre_store, i32>;
2261 def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64, "str", pre_store, i64>;
2262 def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8, "str", pre_store, untyped>;
2263 def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16, "str", pre_store, f16>;
2264 def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32, "str", pre_store, f32>;
2265 def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64, "str", pre_store, f64>;
2266 def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128, "str", pre_store, f128>;
2268 def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32, "strb", pre_truncsti8, i32>;
2269 def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32, "strh", pre_truncsti16, i32>;
2272 def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2273 (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2275 def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2276 (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2278 def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2279 (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2282 def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2283 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2284 def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2285 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2286 def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2287 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2288 def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2289 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2290 def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2291 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2292 def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2293 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2294 def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2295 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2297 def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2298 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2299 def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2300 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2301 def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2302 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2303 def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2304 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2305 def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2306 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2307 def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2308 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2309 def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2310 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2313 // (immediate post-indexed)
2314 def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32, "str", post_store, i32>;
2315 def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64, "str", post_store, i64>;
2316 def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8, "str", post_store, untyped>;
2317 def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16, "str", post_store, f16>;
2318 def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32, "str", post_store, f32>;
2319 def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64, "str", post_store, f64>;
2320 def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128, "str", post_store, f128>;
2322 def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32, "strb", post_truncsti8, i32>;
2323 def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32, "strh", post_truncsti16, i32>;
2326 def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2327 (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2329 def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2330 (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2332 def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2333 (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2336 def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2337 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2338 def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2339 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2340 def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2341 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2342 def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2343 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2344 def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2345 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2346 def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2347 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2348 def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2349 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2351 def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2352 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2353 def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2354 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2355 def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2356 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2357 def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2358 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2359 def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2360 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2361 def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2362 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2363 def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2364 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2366 //===----------------------------------------------------------------------===//
2367 // Load/store exclusive instructions.
2368 //===----------------------------------------------------------------------===//
2370 def LDARW : LoadAcquire <0b10, 1, 1, 0, 1, GPR32, "ldar">;
2371 def LDARX : LoadAcquire <0b11, 1, 1, 0, 1, GPR64, "ldar">;
2372 def LDARB : LoadAcquire <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
2373 def LDARH : LoadAcquire <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
2375 def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
2376 def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
2377 def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
2378 def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
2380 def LDXRW : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
2381 def LDXRX : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
2382 def LDXRB : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
2383 def LDXRH : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
2385 def STLRW : StoreRelease <0b10, 1, 0, 0, 1, GPR32, "stlr">;
2386 def STLRX : StoreRelease <0b11, 1, 0, 0, 1, GPR64, "stlr">;
2387 def STLRB : StoreRelease <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
2388 def STLRH : StoreRelease <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
2390 def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
2391 def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
2392 def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
2393 def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
2395 def STXRW : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
2396 def STXRX : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
2397 def STXRB : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
2398 def STXRH : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
2400 def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
2401 def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
2403 def LDXPW : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
2404 def LDXPX : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
2406 def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
2407 def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
2409 def STXPW : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
2410 def STXPX : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
2412 let Predicates = [HasV8_1a] in {
2413 // v8.1a "Limited Order Region" extension load-acquire instructions
2414 def LDLARW : LoadAcquire <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
2415 def LDLARX : LoadAcquire <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
2416 def LDLARB : LoadAcquire <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
2417 def LDLARH : LoadAcquire <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
2419 // v8.1a "Limited Order Region" extension store-release instructions
2420 def STLLRW : StoreRelease <0b10, 1, 0, 0, 0, GPR32, "stllr">;
2421 def STLLRX : StoreRelease <0b11, 1, 0, 0, 0, GPR64, "stllr">;
2422 def STLLRB : StoreRelease <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
2423 def STLLRH : StoreRelease <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
2426 //===----------------------------------------------------------------------===//
2427 // Scaled floating point to integer conversion instructions.
2428 //===----------------------------------------------------------------------===//
2430 defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
2431 defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
2432 defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
2433 defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
2434 defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
2435 defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
2436 defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
2437 defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
2438 defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
2439 defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
2440 defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
2441 defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
2442 let isCodeGenOnly = 1 in {
2443 defm FCVTZS_Int : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", int_aarch64_neon_fcvtzs>;
2444 defm FCVTZU_Int : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", int_aarch64_neon_fcvtzu>;
2445 defm FCVTZS_Int : FPToIntegerScaled<0b11, 0b000, "fcvtzs", int_aarch64_neon_fcvtzs>;
2446 defm FCVTZU_Int : FPToIntegerScaled<0b11, 0b001, "fcvtzu", int_aarch64_neon_fcvtzu>;
2449 multiclass FPToIntegerPats<SDNode to_int, SDNode round, string INST> {
2450 def : Pat<(i32 (to_int (round f32:$Rn))),
2451 (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
2452 def : Pat<(i64 (to_int (round f32:$Rn))),
2453 (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
2454 def : Pat<(i32 (to_int (round f64:$Rn))),
2455 (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
2456 def : Pat<(i64 (to_int (round f64:$Rn))),
2457 (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
2460 defm : FPToIntegerPats<fp_to_sint, fceil, "FCVTPS">;
2461 defm : FPToIntegerPats<fp_to_uint, fceil, "FCVTPU">;
2462 defm : FPToIntegerPats<fp_to_sint, ffloor, "FCVTMS">;
2463 defm : FPToIntegerPats<fp_to_uint, ffloor, "FCVTMU">;
2464 defm : FPToIntegerPats<fp_to_sint, ftrunc, "FCVTZS">;
2465 defm : FPToIntegerPats<fp_to_uint, ftrunc, "FCVTZU">;
2466 defm : FPToIntegerPats<fp_to_sint, frnd, "FCVTAS">;
2467 defm : FPToIntegerPats<fp_to_uint, frnd, "FCVTAU">;
2469 //===----------------------------------------------------------------------===//
2470 // Scaled integer to floating point conversion instructions.
2471 //===----------------------------------------------------------------------===//
2473 defm SCVTF : IntegerToFP<0, "scvtf", sint_to_fp>;
2474 defm UCVTF : IntegerToFP<1, "ucvtf", uint_to_fp>;
2476 //===----------------------------------------------------------------------===//
2477 // Unscaled integer to floating point conversion instruction.
2478 //===----------------------------------------------------------------------===//
2480 defm FMOV : UnscaledConversion<"fmov">;
2482 // Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
2483 let isReMaterializable = 1, isCodeGenOnly = 1 in {
2484 def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
2485 PseudoInstExpansion<(FMOVWSr FPR32:$Rd, WZR)>,
2487 def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
2488 PseudoInstExpansion<(FMOVXDr FPR64:$Rd, XZR)>,
2492 //===----------------------------------------------------------------------===//
2493 // Floating point conversion instruction.
2494 //===----------------------------------------------------------------------===//
2496 defm FCVT : FPConversion<"fcvt">;
2498 //===----------------------------------------------------------------------===//
2499 // Floating point single operand instructions.
2500 //===----------------------------------------------------------------------===//
2502 defm FABS : SingleOperandFPData<0b0001, "fabs", fabs>;
2503 defm FMOV : SingleOperandFPData<0b0000, "fmov">;
2504 defm FNEG : SingleOperandFPData<0b0010, "fneg", fneg>;
2505 defm FRINTA : SingleOperandFPData<0b1100, "frinta", frnd>;
2506 defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
2507 defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
2508 defm FRINTN : SingleOperandFPData<0b1000, "frintn", int_aarch64_neon_frintn>;
2509 defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
2511 def : Pat<(v1f64 (int_aarch64_neon_frintn (v1f64 FPR64:$Rn))),
2512 (FRINTNDr FPR64:$Rn)>;
2514 defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
2515 defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
2517 let SchedRW = [WriteFDiv] in {
2518 defm FSQRT : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
2521 //===----------------------------------------------------------------------===//
2522 // Floating point two operand instructions.
2523 //===----------------------------------------------------------------------===//
2525 defm FADD : TwoOperandFPData<0b0010, "fadd", fadd>;
2526 let SchedRW = [WriteFDiv] in {
2527 defm FDIV : TwoOperandFPData<0b0001, "fdiv", fdiv>;
2529 defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
2530 defm FMAX : TwoOperandFPData<0b0100, "fmax", fmaxnan>;
2531 defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
2532 defm FMIN : TwoOperandFPData<0b0101, "fmin", fminnan>;
2533 let SchedRW = [WriteFMul] in {
2534 defm FMUL : TwoOperandFPData<0b0000, "fmul", fmul>;
2535 defm FNMUL : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
2537 defm FSUB : TwoOperandFPData<0b0011, "fsub", fsub>;
2539 def : Pat<(v1f64 (fmaxnan (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2540 (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
2541 def : Pat<(v1f64 (fminnan (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2542 (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
2543 def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2544 (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
2545 def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2546 (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
2548 //===----------------------------------------------------------------------===//
2549 // Floating point three operand instructions.
2550 //===----------------------------------------------------------------------===//
2552 defm FMADD : ThreeOperandFPData<0, 0, "fmadd", fma>;
2553 defm FMSUB : ThreeOperandFPData<0, 1, "fmsub",
2554 TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
2555 defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
2556 TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
2557 defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
2558 TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
2560 // The following def pats catch the case where the LHS of an FMA is negated.
2561 // The TriOpFrag above catches the case where the middle operand is negated.
2563 // N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
2564 // the NEON variant.
2565 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
2566 (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2568 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
2569 (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2571 // We handled -(a + b*c) for FNMADD above, now it's time for "(-a) + (-b)*c" and
2573 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
2574 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2576 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
2577 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2579 def : Pat<(f32 (fma FPR32:$Rn, (fneg FPR32:$Rm), (fneg FPR32:$Ra))),
2580 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2582 def : Pat<(f64 (fma FPR64:$Rn, (fneg FPR64:$Rm), (fneg FPR64:$Ra))),
2583 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2585 //===----------------------------------------------------------------------===//
2586 // Floating point comparison instructions.
2587 //===----------------------------------------------------------------------===//
2589 defm FCMPE : FPComparison<1, "fcmpe">;
2590 defm FCMP : FPComparison<0, "fcmp", AArch64fcmp>;
2592 //===----------------------------------------------------------------------===//
2593 // Floating point conditional comparison instructions.
2594 //===----------------------------------------------------------------------===//
2596 defm FCCMPE : FPCondComparison<1, "fccmpe">;
2597 defm FCCMP : FPCondComparison<0, "fccmp", AArch64fccmp>;
2599 //===----------------------------------------------------------------------===//
2600 // Floating point conditional select instruction.
2601 //===----------------------------------------------------------------------===//
2603 defm FCSEL : FPCondSelect<"fcsel">;
2605 // CSEL instructions providing f128 types need to be handled by a
2606 // pseudo-instruction since the eventual code will need to introduce basic
2607 // blocks and control flow.
2608 def F128CSEL : Pseudo<(outs FPR128:$Rd),
2609 (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
2610 [(set (f128 FPR128:$Rd),
2611 (AArch64csel FPR128:$Rn, FPR128:$Rm,
2612 (i32 imm:$cond), NZCV))]> {
2614 let usesCustomInserter = 1;
2618 //===----------------------------------------------------------------------===//
2619 // Floating point immediate move.
2620 //===----------------------------------------------------------------------===//
2622 let isReMaterializable = 1 in {
2623 defm FMOV : FPMoveImmediate<"fmov">;
2626 //===----------------------------------------------------------------------===//
2627 // Advanced SIMD two vector instructions.
2628 //===----------------------------------------------------------------------===//
2630 defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
2632 // Match UABDL in log2-shuffle patterns.
2633 def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
2634 (v8i16 (add (sub (zext (v8i8 V64:$opA)),
2635 (zext (v8i8 V64:$opB))),
2636 (AArch64vashr v8i16:$src, (i32 15))))),
2637 (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
2638 def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
2639 (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
2640 (zext (extract_high_v16i8 V128:$opB))),
2641 (AArch64vashr v8i16:$src, (i32 15))))),
2642 (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
2643 def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
2644 (v4i32 (add (sub (zext (v4i16 V64:$opA)),
2645 (zext (v4i16 V64:$opB))),
2646 (AArch64vashr v4i32:$src, (i32 31))))),
2647 (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
2648 def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
2649 (v4i32 (add (sub (zext (extract_high_v8i16 V128:$opA)),
2650 (zext (extract_high_v8i16 V128:$opB))),
2651 (AArch64vashr v4i32:$src, (i32 31))))),
2652 (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
2653 def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
2654 (v2i64 (add (sub (zext (v2i32 V64:$opA)),
2655 (zext (v2i32 V64:$opB))),
2656 (AArch64vashr v2i64:$src, (i32 63))))),
2657 (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
2658 def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
2659 (v2i64 (add (sub (zext (extract_high_v4i32 V128:$opA)),
2660 (zext (extract_high_v4i32 V128:$opB))),
2661 (AArch64vashr v2i64:$src, (i32 63))))),
2662 (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
2664 defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", int_aarch64_neon_abs>;
2665 def : Pat<(xor (v8i8 (AArch64vashr V64:$src, (i32 7))),
2666 (v8i8 (add V64:$src, (AArch64vashr V64:$src, (i32 7))))),
2667 (ABSv8i8 V64:$src)>;
2668 def : Pat<(xor (v4i16 (AArch64vashr V64:$src, (i32 15))),
2669 (v4i16 (add V64:$src, (AArch64vashr V64:$src, (i32 15))))),
2670 (ABSv4i16 V64:$src)>;
2671 def : Pat<(xor (v2i32 (AArch64vashr V64:$src, (i32 31))),
2672 (v2i32 (add V64:$src, (AArch64vashr V64:$src, (i32 31))))),
2673 (ABSv2i32 V64:$src)>;
2674 def : Pat<(xor (v16i8 (AArch64vashr V128:$src, (i32 7))),
2675 (v16i8 (add V128:$src, (AArch64vashr V128:$src, (i32 7))))),
2676 (ABSv16i8 V128:$src)>;
2677 def : Pat<(xor (v8i16 (AArch64vashr V128:$src, (i32 15))),
2678 (v8i16 (add V128:$src, (AArch64vashr V128:$src, (i32 15))))),
2679 (ABSv8i16 V128:$src)>;
2680 def : Pat<(xor (v4i32 (AArch64vashr V128:$src, (i32 31))),
2681 (v4i32 (add V128:$src, (AArch64vashr V128:$src, (i32 31))))),
2682 (ABSv4i32 V128:$src)>;
2683 def : Pat<(xor (v2i64 (AArch64vashr V128:$src, (i32 63))),
2684 (v2i64 (add V128:$src, (AArch64vashr V128:$src, (i32 63))))),
2685 (ABSv2i64 V128:$src)>;
2687 defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
2688 defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
2689 defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
2690 defm CMGE : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
2691 defm CMGT : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
2692 defm CMLE : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
2693 defm CMLT : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
2694 defm CNT : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
2695 defm FABS : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
2697 defm FCMEQ : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
2698 defm FCMGE : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
2699 defm FCMGT : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
2700 defm FCMLE : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
2701 defm FCMLT : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
2702 defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
2703 defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
2704 defm FCVTL : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
2705 def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
2706 (FCVTLv4i16 V64:$Rn)>;
2707 def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
2709 (FCVTLv8i16 V128:$Rn)>;
2710 def : Pat<(v2f64 (fextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
2711 def : Pat<(v2f64 (fextend (v2f32 (extract_subvector (v4f32 V128:$Rn),
2713 (FCVTLv4i32 V128:$Rn)>;
2715 def : Pat<(v4f32 (fextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
2716 def : Pat<(v4f32 (fextend (v4f16 (extract_subvector (v8f16 V128:$Rn),
2718 (FCVTLv8i16 V128:$Rn)>;
2720 defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
2721 defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
2722 defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
2723 defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
2724 defm FCVTN : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
2725 def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
2726 (FCVTNv4i16 V128:$Rn)>;
2727 def : Pat<(concat_vectors V64:$Rd,
2728 (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
2729 (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
2730 def : Pat<(v2f32 (fround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
2731 def : Pat<(v4f16 (fround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
2732 def : Pat<(concat_vectors V64:$Rd, (v2f32 (fround (v2f64 V128:$Rn)))),
2733 (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
2734 defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
2735 defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
2736 defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
2737 int_aarch64_neon_fcvtxn>;
2738 defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
2739 defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
2740 let isCodeGenOnly = 1 in {
2741 defm FCVTZS_Int : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs",
2742 int_aarch64_neon_fcvtzs>;
2743 defm FCVTZU_Int : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu",
2744 int_aarch64_neon_fcvtzu>;
2746 defm FNEG : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
2747 defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
2748 defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", frnd>;
2749 defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
2750 defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
2751 defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", int_aarch64_neon_frintn>;
2752 defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
2753 defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
2754 defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
2755 defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
2756 defm FSQRT : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
2757 defm NEG : SIMDTwoVectorBHSD<1, 0b01011, "neg",
2758 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
2759 defm NOT : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
2760 // Aliases for MVN -> NOT.
2761 def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
2762 (NOTv8i8 V64:$Vd, V64:$Vn)>;
2763 def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
2764 (NOTv16i8 V128:$Vd, V128:$Vn)>;
2766 def : Pat<(AArch64neg (v8i8 V64:$Rn)), (NEGv8i8 V64:$Rn)>;
2767 def : Pat<(AArch64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>;
2768 def : Pat<(AArch64neg (v4i16 V64:$Rn)), (NEGv4i16 V64:$Rn)>;
2769 def : Pat<(AArch64neg (v8i16 V128:$Rn)), (NEGv8i16 V128:$Rn)>;
2770 def : Pat<(AArch64neg (v2i32 V64:$Rn)), (NEGv2i32 V64:$Rn)>;
2771 def : Pat<(AArch64neg (v4i32 V128:$Rn)), (NEGv4i32 V128:$Rn)>;
2772 def : Pat<(AArch64neg (v2i64 V128:$Rn)), (NEGv2i64 V128:$Rn)>;
2774 def : Pat<(AArch64not (v8i8 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2775 def : Pat<(AArch64not (v16i8 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2776 def : Pat<(AArch64not (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2777 def : Pat<(AArch64not (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2778 def : Pat<(AArch64not (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2779 def : Pat<(AArch64not (v1i64 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2780 def : Pat<(AArch64not (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2781 def : Pat<(AArch64not (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2783 def : Pat<(vnot (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2784 def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2785 def : Pat<(vnot (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2786 def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2787 def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2789 defm RBIT : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", int_aarch64_neon_rbit>;
2790 defm REV16 : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
2791 defm REV32 : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
2792 defm REV64 : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
2793 defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
2794 BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
2795 defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
2796 defm SCVTF : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
2797 defm SHLL : SIMDVectorLShiftLongBySizeBHS;
2798 defm SQABS : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
2799 defm SQNEG : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
2800 defm SQXTN : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
2801 defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
2802 defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
2803 defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
2804 BinOpFrag<(add node:$LHS, (int_aarch64_neon_uaddlp node:$RHS))> >;
2805 defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp",
2806 int_aarch64_neon_uaddlp>;
2807 defm UCVTF : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
2808 defm UQXTN : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
2809 defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
2810 defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
2811 defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
2812 defm XTN : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
2814 def : Pat<(v4f16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>;
2815 def : Pat<(v4f16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>;
2816 def : Pat<(v8f16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
2817 def : Pat<(v8f16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
2818 def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>;
2819 def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
2821 // Patterns for vector long shift (by element width). These need to match all
2822 // three of zext, sext and anyext so it's easier to pull the patterns out of the
2824 multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
2825 def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
2826 (SHLLv8i8 V64:$Rn)>;
2827 def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
2828 (SHLLv16i8 V128:$Rn)>;
2829 def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
2830 (SHLLv4i16 V64:$Rn)>;
2831 def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
2832 (SHLLv8i16 V128:$Rn)>;
2833 def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
2834 (SHLLv2i32 V64:$Rn)>;
2835 def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
2836 (SHLLv4i32 V128:$Rn)>;
2839 defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
2840 defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
2841 defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
2843 //===----------------------------------------------------------------------===//
2844 // Advanced SIMD three vector instructions.
2845 //===----------------------------------------------------------------------===//
2847 defm ADD : SIMDThreeSameVector<0, 0b10000, "add", add>;
2848 defm ADDP : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
2849 defm CMEQ : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
2850 defm CMGE : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
2851 defm CMGT : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
2852 defm CMHI : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
2853 defm CMHS : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
2854 defm CMTST : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
2855 defm FABD : SIMDThreeSameVectorFP<1,1,0b11010,"fabd", int_aarch64_neon_fabd>;
2856 defm FACGE : SIMDThreeSameVectorFPCmp<1,0,0b11101,"facge",int_aarch64_neon_facge>;
2857 defm FACGT : SIMDThreeSameVectorFPCmp<1,1,0b11101,"facgt",int_aarch64_neon_facgt>;
2858 defm FADDP : SIMDThreeSameVectorFP<1,0,0b11010,"faddp",int_aarch64_neon_addp>;
2859 defm FADD : SIMDThreeSameVectorFP<0,0,0b11010,"fadd", fadd>;
2860 defm FCMEQ : SIMDThreeSameVectorFPCmp<0, 0, 0b11100, "fcmeq", AArch64fcmeq>;
2861 defm FCMGE : SIMDThreeSameVectorFPCmp<1, 0, 0b11100, "fcmge", AArch64fcmge>;
2862 defm FCMGT : SIMDThreeSameVectorFPCmp<1, 1, 0b11100, "fcmgt", AArch64fcmgt>;
2863 defm FDIV : SIMDThreeSameVectorFP<1,0,0b11111,"fdiv", fdiv>;
2864 defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b11000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
2865 defm FMAXNM : SIMDThreeSameVectorFP<0,0,0b11000,"fmaxnm", fmaxnum>;
2866 defm FMAXP : SIMDThreeSameVectorFP<1,0,0b11110,"fmaxp", int_aarch64_neon_fmaxp>;
2867 defm FMAX : SIMDThreeSameVectorFP<0,0,0b11110,"fmax", fmaxnan>;
2868 defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b11000,"fminnmp", int_aarch64_neon_fminnmp>;
2869 defm FMINNM : SIMDThreeSameVectorFP<0,1,0b11000,"fminnm", fminnum>;
2870 defm FMINP : SIMDThreeSameVectorFP<1,1,0b11110,"fminp", int_aarch64_neon_fminp>;
2871 defm FMIN : SIMDThreeSameVectorFP<0,1,0b11110,"fmin", fminnan>;
2873 // NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
2874 // instruction expects the addend first, while the fma intrinsic puts it last.
2875 defm FMLA : SIMDThreeSameVectorFPTied<0, 0, 0b11001, "fmla",
2876 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
2877 defm FMLS : SIMDThreeSameVectorFPTied<0, 1, 0b11001, "fmls",
2878 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
2880 // The following def pats catch the case where the LHS of an FMA is negated.
2881 // The TriOpFrag above catches the case where the middle operand is negated.
2882 def : Pat<(v2f32 (fma (fneg V64:$Rn), V64:$Rm, V64:$Rd)),
2883 (FMLSv2f32 V64:$Rd, V64:$Rn, V64:$Rm)>;
2885 def : Pat<(v4f32 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
2886 (FMLSv4f32 V128:$Rd, V128:$Rn, V128:$Rm)>;
2888 def : Pat<(v2f64 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
2889 (FMLSv2f64 V128:$Rd, V128:$Rn, V128:$Rm)>;
2891 defm FMULX : SIMDThreeSameVectorFP<0,0,0b11011,"fmulx", int_aarch64_neon_fmulx>;
2892 defm FMUL : SIMDThreeSameVectorFP<1,0,0b11011,"fmul", fmul>;
2893 defm FRECPS : SIMDThreeSameVectorFP<0,0,0b11111,"frecps", int_aarch64_neon_frecps>;
2894 defm FRSQRTS : SIMDThreeSameVectorFP<0,1,0b11111,"frsqrts", int_aarch64_neon_frsqrts>;
2895 defm FSUB : SIMDThreeSameVectorFP<0,1,0b11010,"fsub", fsub>;
2896 defm MLA : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla",
2897 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))> >;
2898 defm MLS : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls",
2899 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))> >;
2900 defm MUL : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
2901 defm PMUL : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
2902 defm SABA : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
2903 TriOpFrag<(add node:$LHS, (sabsdiff node:$MHS, node:$RHS))> >;
2904 defm SABD : SIMDThreeSameVectorBHS<0,0b01110,"sabd", sabsdiff>;
2905 defm SHADD : SIMDThreeSameVectorBHS<0,0b00000,"shadd", int_aarch64_neon_shadd>;
2906 defm SHSUB : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
2907 defm SMAXP : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
2908 defm SMAX : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
2909 defm SMINP : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
2910 defm SMIN : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
2911 defm SQADD : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
2912 defm SQDMULH : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
2913 defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
2914 defm SQRSHL : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
2915 defm SQSHL : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
2916 defm SQSUB : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
2917 defm SRHADD : SIMDThreeSameVectorBHS<0,0b00010,"srhadd",int_aarch64_neon_srhadd>;
2918 defm SRSHL : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
2919 defm SSHL : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
2920 defm SUB : SIMDThreeSameVector<1,0b10000,"sub", sub>;
2921 defm UABA : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
2922 TriOpFrag<(add node:$LHS, (uabsdiff node:$MHS, node:$RHS))> >;
2923 defm UABD : SIMDThreeSameVectorBHS<1,0b01110,"uabd", uabsdiff>;
2924 defm UHADD : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", int_aarch64_neon_uhadd>;
2925 defm UHSUB : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
2926 defm UMAXP : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
2927 defm UMAX : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
2928 defm UMINP : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
2929 defm UMIN : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
2930 defm UQADD : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
2931 defm UQRSHL : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
2932 defm UQSHL : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
2933 defm UQSUB : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
2934 defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", int_aarch64_neon_urhadd>;
2935 defm URSHL : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
2936 defm USHL : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
2937 defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
2938 int_aarch64_neon_sqadd>;
2939 defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
2940 int_aarch64_neon_sqsub>;
2942 defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
2943 defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
2944 BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
2945 defm BIF : SIMDLogicalThreeVector<1, 0b11, "bif">;
2946 defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
2947 defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl",
2948 TriOpFrag<(or (and node:$LHS, node:$MHS), (and (vnot node:$LHS), node:$RHS))>>;
2949 defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
2950 defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
2951 BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
2952 defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
2955 def : Pat<(AArch64bsl (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
2956 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
2957 def : Pat<(AArch64bsl (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
2958 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
2959 def : Pat<(AArch64bsl (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
2960 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
2961 def : Pat<(AArch64bsl (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
2962 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
2964 def : Pat<(AArch64bsl (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
2965 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
2966 def : Pat<(AArch64bsl (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
2967 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
2968 def : Pat<(AArch64bsl (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
2969 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
2970 def : Pat<(AArch64bsl (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
2971 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
2973 def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
2974 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
2975 def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
2976 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
2977 def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
2978 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
2979 def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
2980 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
2982 def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
2983 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
2984 def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
2985 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
2986 def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
2987 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
2988 def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
2989 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
2991 def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
2992 "|cmls.8b\t$dst, $src1, $src2}",
2993 (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
2994 def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
2995 "|cmls.16b\t$dst, $src1, $src2}",
2996 (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
2997 def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
2998 "|cmls.4h\t$dst, $src1, $src2}",
2999 (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3000 def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
3001 "|cmls.8h\t$dst, $src1, $src2}",
3002 (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3003 def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
3004 "|cmls.2s\t$dst, $src1, $src2}",
3005 (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3006 def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
3007 "|cmls.4s\t$dst, $src1, $src2}",
3008 (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3009 def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
3010 "|cmls.2d\t$dst, $src1, $src2}",
3011 (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3013 def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
3014 "|cmlo.8b\t$dst, $src1, $src2}",
3015 (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3016 def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
3017 "|cmlo.16b\t$dst, $src1, $src2}",
3018 (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3019 def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
3020 "|cmlo.4h\t$dst, $src1, $src2}",
3021 (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3022 def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
3023 "|cmlo.8h\t$dst, $src1, $src2}",
3024 (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3025 def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
3026 "|cmlo.2s\t$dst, $src1, $src2}",
3027 (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3028 def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
3029 "|cmlo.4s\t$dst, $src1, $src2}",
3030 (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3031 def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
3032 "|cmlo.2d\t$dst, $src1, $src2}",
3033 (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3035 def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
3036 "|cmle.8b\t$dst, $src1, $src2}",
3037 (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3038 def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
3039 "|cmle.16b\t$dst, $src1, $src2}",
3040 (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3041 def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
3042 "|cmle.4h\t$dst, $src1, $src2}",
3043 (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3044 def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
3045 "|cmle.8h\t$dst, $src1, $src2}",
3046 (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3047 def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
3048 "|cmle.2s\t$dst, $src1, $src2}",
3049 (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3050 def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
3051 "|cmle.4s\t$dst, $src1, $src2}",
3052 (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3053 def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
3054 "|cmle.2d\t$dst, $src1, $src2}",
3055 (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3057 def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
3058 "|cmlt.8b\t$dst, $src1, $src2}",
3059 (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3060 def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
3061 "|cmlt.16b\t$dst, $src1, $src2}",
3062 (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3063 def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
3064 "|cmlt.4h\t$dst, $src1, $src2}",
3065 (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3066 def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
3067 "|cmlt.8h\t$dst, $src1, $src2}",
3068 (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3069 def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
3070 "|cmlt.2s\t$dst, $src1, $src2}",
3071 (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3072 def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
3073 "|cmlt.4s\t$dst, $src1, $src2}",
3074 (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3075 def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
3076 "|cmlt.2d\t$dst, $src1, $src2}",
3077 (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3079 def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
3080 "|fcmle.2s\t$dst, $src1, $src2}",
3081 (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3082 def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
3083 "|fcmle.4s\t$dst, $src1, $src2}",
3084 (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3085 def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
3086 "|fcmle.2d\t$dst, $src1, $src2}",
3087 (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3089 def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
3090 "|fcmlt.2s\t$dst, $src1, $src2}",
3091 (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3092 def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
3093 "|fcmlt.4s\t$dst, $src1, $src2}",
3094 (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3095 def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
3096 "|fcmlt.2d\t$dst, $src1, $src2}",
3097 (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3099 def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
3100 "|facle.2s\t$dst, $src1, $src2}",
3101 (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3102 def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
3103 "|facle.4s\t$dst, $src1, $src2}",
3104 (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3105 def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
3106 "|facle.2d\t$dst, $src1, $src2}",
3107 (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3109 def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
3110 "|faclt.2s\t$dst, $src1, $src2}",
3111 (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3112 def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
3113 "|faclt.4s\t$dst, $src1, $src2}",
3114 (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3115 def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
3116 "|faclt.2d\t$dst, $src1, $src2}",
3117 (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3119 //===----------------------------------------------------------------------===//
3120 // Advanced SIMD three scalar instructions.
3121 //===----------------------------------------------------------------------===//
3123 defm ADD : SIMDThreeScalarD<0, 0b10000, "add", add>;
3124 defm CMEQ : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
3125 defm CMGE : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
3126 defm CMGT : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
3127 defm CMHI : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
3128 defm CMHS : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
3129 defm CMTST : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
3130 defm FABD : SIMDThreeScalarSD<1, 1, 0b11010, "fabd", int_aarch64_sisd_fabd>;
3131 def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3132 (FABD64 FPR64:$Rn, FPR64:$Rm)>;
3133 defm FACGE : SIMDThreeScalarFPCmp<1, 0, 0b11101, "facge",
3134 int_aarch64_neon_facge>;
3135 defm FACGT : SIMDThreeScalarFPCmp<1, 1, 0b11101, "facgt",
3136 int_aarch64_neon_facgt>;
3137 defm FCMEQ : SIMDThreeScalarFPCmp<0, 0, 0b11100, "fcmeq", AArch64fcmeq>;
3138 defm FCMGE : SIMDThreeScalarFPCmp<1, 0, 0b11100, "fcmge", AArch64fcmge>;
3139 defm FCMGT : SIMDThreeScalarFPCmp<1, 1, 0b11100, "fcmgt", AArch64fcmgt>;
3140 defm FMULX : SIMDThreeScalarSD<0, 0, 0b11011, "fmulx", int_aarch64_neon_fmulx>;
3141 defm FRECPS : SIMDThreeScalarSD<0, 0, 0b11111, "frecps", int_aarch64_neon_frecps>;
3142 defm FRSQRTS : SIMDThreeScalarSD<0, 1, 0b11111, "frsqrts", int_aarch64_neon_frsqrts>;
3143 defm SQADD : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
3144 defm SQDMULH : SIMDThreeScalarHS< 0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
3145 defm SQRDMULH : SIMDThreeScalarHS< 1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
3146 defm SQRSHL : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
3147 defm SQSHL : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
3148 defm SQSUB : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
3149 defm SRSHL : SIMDThreeScalarD< 0, 0b01010, "srshl", int_aarch64_neon_srshl>;
3150 defm SSHL : SIMDThreeScalarD< 0, 0b01000, "sshl", int_aarch64_neon_sshl>;
3151 defm SUB : SIMDThreeScalarD< 1, 0b10000, "sub", sub>;
3152 defm UQADD : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
3153 defm UQRSHL : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
3154 defm UQSHL : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
3155 defm UQSUB : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
3156 defm URSHL : SIMDThreeScalarD< 1, 0b01010, "urshl", int_aarch64_neon_urshl>;
3157 defm USHL : SIMDThreeScalarD< 1, 0b01000, "ushl", int_aarch64_neon_ushl>;
3158 let Predicates = [HasV8_1a] in {
3159 defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
3160 defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
3161 def : Pat<(i32 (int_aarch64_neon_sqadd
3163 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
3164 (i32 FPR32:$Rm))))),
3165 (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3166 def : Pat<(i32 (int_aarch64_neon_sqsub
3168 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
3169 (i32 FPR32:$Rm))))),
3170 (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3173 def : InstAlias<"cmls $dst, $src1, $src2",
3174 (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3175 def : InstAlias<"cmle $dst, $src1, $src2",
3176 (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3177 def : InstAlias<"cmlo $dst, $src1, $src2",
3178 (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3179 def : InstAlias<"cmlt $dst, $src1, $src2",
3180 (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3181 def : InstAlias<"fcmle $dst, $src1, $src2",
3182 (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3183 def : InstAlias<"fcmle $dst, $src1, $src2",
3184 (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3185 def : InstAlias<"fcmlt $dst, $src1, $src2",
3186 (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3187 def : InstAlias<"fcmlt $dst, $src1, $src2",
3188 (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3189 def : InstAlias<"facle $dst, $src1, $src2",
3190 (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3191 def : InstAlias<"facle $dst, $src1, $src2",
3192 (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3193 def : InstAlias<"faclt $dst, $src1, $src2",
3194 (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3195 def : InstAlias<"faclt $dst, $src1, $src2",
3196 (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3198 //===----------------------------------------------------------------------===//
3199 // Advanced SIMD three scalar instructions (mixed operands).
3200 //===----------------------------------------------------------------------===//
3201 defm SQDMULL : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
3202 int_aarch64_neon_sqdmulls_scalar>;
3203 defm SQDMLAL : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
3204 defm SQDMLSL : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
3206 def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
3207 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
3208 (i32 FPR32:$Rm))))),
3209 (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3210 def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
3211 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
3212 (i32 FPR32:$Rm))))),
3213 (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3215 //===----------------------------------------------------------------------===//
3216 // Advanced SIMD two scalar instructions.
3217 //===----------------------------------------------------------------------===//
3219 defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", int_aarch64_neon_abs>;
3220 defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
3221 defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
3222 defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
3223 defm CMLE : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
3224 defm CMLT : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
3225 defm FCMEQ : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
3226 defm FCMGE : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
3227 defm FCMGT : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
3228 defm FCMLE : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
3229 defm FCMLT : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
3230 defm FCVTAS : SIMDFPTwoScalar< 0, 0, 0b11100, "fcvtas">;
3231 defm FCVTAU : SIMDFPTwoScalar< 1, 0, 0b11100, "fcvtau">;
3232 defm FCVTMS : SIMDFPTwoScalar< 0, 0, 0b11011, "fcvtms">;
3233 defm FCVTMU : SIMDFPTwoScalar< 1, 0, 0b11011, "fcvtmu">;
3234 defm FCVTNS : SIMDFPTwoScalar< 0, 0, 0b11010, "fcvtns">;
3235 defm FCVTNU : SIMDFPTwoScalar< 1, 0, 0b11010, "fcvtnu">;
3236 defm FCVTPS : SIMDFPTwoScalar< 0, 1, 0b11010, "fcvtps">;
3237 defm FCVTPU : SIMDFPTwoScalar< 1, 1, 0b11010, "fcvtpu">;
3238 def FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
3239 defm FCVTZS : SIMDFPTwoScalar< 0, 1, 0b11011, "fcvtzs">;
3240 defm FCVTZU : SIMDFPTwoScalar< 1, 1, 0b11011, "fcvtzu">;
3241 defm FRECPE : SIMDFPTwoScalar< 0, 1, 0b11101, "frecpe">;
3242 defm FRECPX : SIMDFPTwoScalar< 0, 1, 0b11111, "frecpx">;
3243 defm FRSQRTE : SIMDFPTwoScalar< 1, 1, 0b11101, "frsqrte">;
3244 defm NEG : SIMDTwoScalarD< 1, 0b01011, "neg",
3245 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
3246 defm SCVTF : SIMDTwoScalarCVTSD< 0, 0, 0b11101, "scvtf", AArch64sitof>;
3247 defm SQABS : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
3248 defm SQNEG : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
3249 defm SQXTN : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
3250 defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
3251 defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
3252 int_aarch64_neon_suqadd>;
3253 defm UCVTF : SIMDTwoScalarCVTSD< 1, 0, 0b11101, "ucvtf", AArch64uitof>;
3254 defm UQXTN : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
3255 defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
3256 int_aarch64_neon_usqadd>;
3258 def : Pat<(AArch64neg (v1i64 V64:$Rn)), (NEGv1i64 V64:$Rn)>;
3260 def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
3261 (FCVTASv1i64 FPR64:$Rn)>;
3262 def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
3263 (FCVTAUv1i64 FPR64:$Rn)>;
3264 def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
3265 (FCVTMSv1i64 FPR64:$Rn)>;
3266 def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
3267 (FCVTMUv1i64 FPR64:$Rn)>;
3268 def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
3269 (FCVTNSv1i64 FPR64:$Rn)>;
3270 def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
3271 (FCVTNUv1i64 FPR64:$Rn)>;
3272 def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
3273 (FCVTPSv1i64 FPR64:$Rn)>;
3274 def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
3275 (FCVTPUv1i64 FPR64:$Rn)>;
3277 def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
3278 (FRECPEv1i32 FPR32:$Rn)>;
3279 def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
3280 (FRECPEv1i64 FPR64:$Rn)>;
3281 def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
3282 (FRECPEv1i64 FPR64:$Rn)>;
3284 def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
3285 (FRECPXv1i32 FPR32:$Rn)>;
3286 def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
3287 (FRECPXv1i64 FPR64:$Rn)>;
3289 def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
3290 (FRSQRTEv1i32 FPR32:$Rn)>;
3291 def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
3292 (FRSQRTEv1i64 FPR64:$Rn)>;
3293 def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
3294 (FRSQRTEv1i64 FPR64:$Rn)>;
3296 // If an integer is about to be converted to a floating point value,
3297 // just load it on the floating point unit.
3298 // Here are the patterns for 8 and 16-bits to float.
3300 multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
3301 SDPatternOperator loadop, Instruction UCVTF,
3302 ROAddrMode ro, Instruction LDRW, Instruction LDRX,
3304 def : Pat<(DstTy (uint_to_fp (SrcTy
3305 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
3306 ro.Wext:$extend))))),
3307 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
3308 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
3311 def : Pat<(DstTy (uint_to_fp (SrcTy
3312 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
3313 ro.Wext:$extend))))),
3314 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
3315 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
3319 defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
3320 UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
3321 def : Pat <(f32 (uint_to_fp (i32
3322 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
3323 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3324 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
3325 def : Pat <(f32 (uint_to_fp (i32
3326 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
3327 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3328 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
3329 // 16-bits -> float.
3330 defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
3331 UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
3332 def : Pat <(f32 (uint_to_fp (i32
3333 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
3334 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3335 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
3336 def : Pat <(f32 (uint_to_fp (i32
3337 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
3338 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3339 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
3340 // 32-bits are handled in target specific dag combine:
3341 // performIntToFpCombine.
3342 // 64-bits integer to 32-bits floating point, not possible with
3343 // UCVTF on floating point registers (both source and destination
3344 // must have the same size).
3346 // Here are the patterns for 8, 16, 32, and 64-bits to double.
3347 // 8-bits -> double.
3348 defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
3349 UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
3350 def : Pat <(f64 (uint_to_fp (i32
3351 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
3352 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3353 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
3354 def : Pat <(f64 (uint_to_fp (i32
3355 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
3356 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3357 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
3358 // 16-bits -> double.
3359 defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
3360 UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
3361 def : Pat <(f64 (uint_to_fp (i32
3362 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
3363 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3364 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
3365 def : Pat <(f64 (uint_to_fp (i32
3366 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
3367 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3368 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
3369 // 32-bits -> double.
3370 defm : UIntToFPROLoadPat<f64, i32, load,
3371 UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
3372 def : Pat <(f64 (uint_to_fp (i32
3373 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
3374 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3375 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
3376 def : Pat <(f64 (uint_to_fp (i32
3377 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
3378 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3379 (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
3380 // 64-bits -> double are handled in target specific dag combine:
3381 // performIntToFpCombine.
3383 //===----------------------------------------------------------------------===//
3384 // Advanced SIMD three different-sized vector instructions.
3385 //===----------------------------------------------------------------------===//
3387 defm ADDHN : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
3388 defm SUBHN : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
3389 defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
3390 defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
3391 defm PMULL : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
3392 defm SABAL : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
3394 defm SABDL : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
3396 defm SADDL : SIMDLongThreeVectorBHS< 0, 0b0000, "saddl",
3397 BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
3398 defm SADDW : SIMDWideThreeVectorBHS< 0, 0b0001, "saddw",
3399 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
3400 defm SMLAL : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
3401 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
3402 defm SMLSL : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
3403 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
3404 defm SMULL : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
3405 defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
3406 int_aarch64_neon_sqadd>;
3407 defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
3408 int_aarch64_neon_sqsub>;
3409 defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
3410 int_aarch64_neon_sqdmull>;
3411 defm SSUBL : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
3412 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
3413 defm SSUBW : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
3414 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
3415 defm UABAL : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
3417 defm UADDL : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
3418 BinOpFrag<(add (zext node:$LHS), (zext node:$RHS))>>;
3419 defm UADDW : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
3420 BinOpFrag<(add node:$LHS, (zext node:$RHS))>>;
3421 defm UMLAL : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
3422 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
3423 defm UMLSL : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
3424 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
3425 defm UMULL : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
3426 defm USUBL : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
3427 BinOpFrag<(sub (zext node:$LHS), (zext node:$RHS))>>;
3428 defm USUBW : SIMDWideThreeVectorBHS< 1, 0b0011, "usubw",
3429 BinOpFrag<(sub node:$LHS, (zext node:$RHS))>>;
3431 // Additional patterns for SMULL and UMULL
3432 multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
3433 Instruction INST8B, Instruction INST4H, Instruction INST2S> {
3434 def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
3435 (INST8B V64:$Rn, V64:$Rm)>;
3436 def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
3437 (INST4H V64:$Rn, V64:$Rm)>;
3438 def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
3439 (INST2S V64:$Rn, V64:$Rm)>;
3442 defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
3443 SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
3444 defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
3445 UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
3447 // Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
3448 multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
3449 Instruction INST8B, Instruction INST4H, Instruction INST2S> {
3450 def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
3451 (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
3452 def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
3453 (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
3454 def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
3455 (INST2S V128:$Rd, V64:$Rn, V64:$Rm)>;
3458 defm : Neon_mulacc_widen_patterns<
3459 TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
3460 SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
3461 defm : Neon_mulacc_widen_patterns<
3462 TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
3463 UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
3464 defm : Neon_mulacc_widen_patterns<
3465 TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
3466 SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
3467 defm : Neon_mulacc_widen_patterns<
3468 TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
3469 UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
3471 // Patterns for 64-bit pmull
3472 def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
3473 (PMULLv1i64 V64:$Rn, V64:$Rm)>;
3474 def : Pat<(int_aarch64_neon_pmull64 (vector_extract (v2i64 V128:$Rn), (i64 1)),
3475 (vector_extract (v2i64 V128:$Rm), (i64 1))),
3476 (PMULLv2i64 V128:$Rn, V128:$Rm)>;
3478 // CodeGen patterns for addhn and subhn instructions, which can actually be
3479 // written in LLVM IR without too much difficulty.
3482 def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
3483 (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
3484 def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3486 (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
3487 def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3489 (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
3490 def : Pat<(concat_vectors (v8i8 V64:$Rd),
3491 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3493 (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3494 V128:$Rn, V128:$Rm)>;
3495 def : Pat<(concat_vectors (v4i16 V64:$Rd),
3496 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3498 (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3499 V128:$Rn, V128:$Rm)>;
3500 def : Pat<(concat_vectors (v2i32 V64:$Rd),
3501 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3503 (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3504 V128:$Rn, V128:$Rm)>;
3507 def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
3508 (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
3509 def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3511 (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
3512 def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3514 (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
3515 def : Pat<(concat_vectors (v8i8 V64:$Rd),
3516 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3518 (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3519 V128:$Rn, V128:$Rm)>;
3520 def : Pat<(concat_vectors (v4i16 V64:$Rd),
3521 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3523 (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3524 V128:$Rn, V128:$Rm)>;
3525 def : Pat<(concat_vectors (v2i32 V64:$Rd),
3526 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3528 (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3529 V128:$Rn, V128:$Rm)>;
3531 //----------------------------------------------------------------------------
3532 // AdvSIMD bitwise extract from vector instruction.
3533 //----------------------------------------------------------------------------
3535 defm EXT : SIMDBitwiseExtract<"ext">;
3537 def : Pat<(v4i16 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3538 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3539 def : Pat<(v8i16 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3540 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3541 def : Pat<(v2i32 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3542 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3543 def : Pat<(v2f32 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3544 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3545 def : Pat<(v4i32 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3546 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3547 def : Pat<(v4f32 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3548 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3549 def : Pat<(v2i64 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3550 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3551 def : Pat<(v2f64 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3552 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3553 def : Pat<(v4f16 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3554 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3555 def : Pat<(v8f16 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3556 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3558 // We use EXT to handle extract_subvector to copy the upper 64-bits of a
3560 def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 8))),
3561 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3562 def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 4))),
3563 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3564 def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 2))),
3565 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3566 def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 1))),
3567 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3568 def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 4))),
3569 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3570 def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 2))),
3571 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3572 def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 1))),
3573 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3576 //----------------------------------------------------------------------------
3577 // AdvSIMD zip vector
3578 //----------------------------------------------------------------------------
3580 defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
3581 defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
3582 defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
3583 defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
3584 defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
3585 defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
3587 //----------------------------------------------------------------------------
3588 // AdvSIMD TBL/TBX instructions
3589 //----------------------------------------------------------------------------
3591 defm TBL : SIMDTableLookup< 0, "tbl">;
3592 defm TBX : SIMDTableLookupTied<1, "tbx">;
3594 def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
3595 (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
3596 def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
3597 (TBLv16i8One V128:$Ri, V128:$Rn)>;
3599 def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
3600 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
3601 (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
3602 def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
3603 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
3604 (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
3607 //----------------------------------------------------------------------------
3608 // AdvSIMD scalar CPY instruction
3609 //----------------------------------------------------------------------------
3611 defm CPY : SIMDScalarCPY<"cpy">;
3613 //----------------------------------------------------------------------------
3614 // AdvSIMD scalar pairwise instructions
3615 //----------------------------------------------------------------------------
3617 defm ADDP : SIMDPairwiseScalarD<0, 0b11011, "addp">;
3618 defm FADDP : SIMDFPPairwiseScalar<1, 0, 0b01101, "faddp">;
3619 defm FMAXNMP : SIMDFPPairwiseScalar<1, 0, 0b01100, "fmaxnmp">;
3620 defm FMAXP : SIMDFPPairwiseScalar<1, 0, 0b01111, "fmaxp">;
3621 defm FMINNMP : SIMDFPPairwiseScalar<1, 1, 0b01100, "fminnmp">;
3622 defm FMINP : SIMDFPPairwiseScalar<1, 1, 0b01111, "fminp">;
3623 def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
3624 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
3625 def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
3626 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
3627 def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
3628 (FADDPv2i32p V64:$Rn)>;
3629 def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
3630 (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
3631 def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
3632 (FADDPv2i64p V128:$Rn)>;
3633 def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
3634 (FMAXNMPv2i32p V64:$Rn)>;
3635 def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
3636 (FMAXNMPv2i64p V128:$Rn)>;
3637 def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
3638 (FMAXPv2i32p V64:$Rn)>;
3639 def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
3640 (FMAXPv2i64p V128:$Rn)>;
3641 def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
3642 (FMINNMPv2i32p V64:$Rn)>;
3643 def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
3644 (FMINNMPv2i64p V128:$Rn)>;
3645 def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
3646 (FMINPv2i32p V64:$Rn)>;
3647 def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
3648 (FMINPv2i64p V128:$Rn)>;
3650 //----------------------------------------------------------------------------
3651 // AdvSIMD INS/DUP instructions
3652 //----------------------------------------------------------------------------
3654 def DUPv8i8gpr : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
3655 def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
3656 def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
3657 def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
3658 def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
3659 def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
3660 def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
3662 def DUPv2i64lane : SIMDDup64FromElement;
3663 def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
3664 def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
3665 def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
3666 def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
3667 def DUPv8i8lane : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
3668 def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
3670 def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
3671 (v2f32 (DUPv2i32lane
3672 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
3674 def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
3675 (v4f32 (DUPv4i32lane
3676 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
3678 def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
3679 (v2f64 (DUPv2i64lane
3680 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
3682 def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
3683 (v4f16 (DUPv4i16lane
3684 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
3686 def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
3687 (v8f16 (DUPv8i16lane
3688 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
3691 def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
3692 (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
3693 def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
3694 (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
3696 def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
3697 (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
3698 def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
3699 (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
3700 def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
3701 (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
3703 // If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
3704 // instruction even if the types don't match: we just have to remap the lane
3705 // carefully. N.b. this trick only applies to truncations.
3706 def VecIndex_x2 : SDNodeXForm<imm, [{
3707 return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
3709 def VecIndex_x4 : SDNodeXForm<imm, [{
3710 return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
3712 def VecIndex_x8 : SDNodeXForm<imm, [{
3713 return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
3716 multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
3717 ValueType Src128VT, ValueType ScalVT,
3718 Instruction DUP, SDNodeXForm IdxXFORM> {
3719 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
3721 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
3723 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
3725 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
3728 defm : DUPWithTruncPats<v8i8, v4i16, v8i16, i32, DUPv8i8lane, VecIndex_x2>;
3729 defm : DUPWithTruncPats<v8i8, v2i32, v4i32, i32, DUPv8i8lane, VecIndex_x4>;
3730 defm : DUPWithTruncPats<v4i16, v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
3732 defm : DUPWithTruncPats<v16i8, v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
3733 defm : DUPWithTruncPats<v16i8, v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
3734 defm : DUPWithTruncPats<v8i16, v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
3736 multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
3737 SDNodeXForm IdxXFORM> {
3738 def : Pat<(ResVT (AArch64dup (i32 (trunc (vector_extract (v2i64 V128:$Rn),
3740 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
3742 def : Pat<(ResVT (AArch64dup (i32 (trunc (vector_extract (v1i64 V64:$Rn),
3744 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
3747 defm : DUPWithTrunci64Pats<v8i8, DUPv8i8lane, VecIndex_x8>;
3748 defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane, VecIndex_x4>;
3749 defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane, VecIndex_x2>;
3751 defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
3752 defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
3753 defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
3755 // SMOV and UMOV definitions, with some extra patterns for convenience
3759 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
3760 (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
3761 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
3762 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
3763 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3764 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
3765 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3766 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
3767 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3768 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
3769 def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
3770 (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
3772 // Extracting i8 or i16 elements will have the zero-extend transformed to
3773 // an 'and' mask by type legalization since neither i8 nor i16 are legal types
3774 // for AArch64. Match these patterns here since UMOV already zeroes out the high
3775 // bits of the destination register.
3776 def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
3778 (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
3779 def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
3781 (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
3785 def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
3786 (SUBREG_TO_REG (i32 0),
3787 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3788 def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
3789 (SUBREG_TO_REG (i32 0),
3790 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3792 def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
3793 (SUBREG_TO_REG (i32 0),
3794 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3795 def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
3796 (SUBREG_TO_REG (i32 0),
3797 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3799 def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
3800 (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
3801 (i32 FPR32:$Rn), ssub))>;
3802 def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
3803 (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
3804 (i32 FPR32:$Rn), ssub))>;
3805 def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
3806 (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
3807 (i64 FPR64:$Rn), dsub))>;
3809 def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
3810 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
3811 def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
3812 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
3813 def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
3814 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
3816 def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
3817 (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
3820 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
3822 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
3826 def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
3827 (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
3829 V128:$Rn, VectorIndexH:$imm,
3830 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
3833 def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
3834 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
3837 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
3839 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
3842 def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
3843 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
3845 V128:$Rn, VectorIndexS:$imm,
3846 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
3848 def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
3849 (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
3851 V128:$Rn, VectorIndexD:$imm,
3852 (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
3855 // Copy an element at a constant index in one vector into a constant indexed
3856 // element of another.
3857 // FIXME refactor to a shared class/dev parameterized on vector type, vector
3858 // index type and INS extension
3859 def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
3860 (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
3861 VectorIndexB:$idx2)),
3863 V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
3865 def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
3866 (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
3867 VectorIndexH:$idx2)),
3869 V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
3871 def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
3872 (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
3873 VectorIndexS:$idx2)),
3875 V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
3877 def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
3878 (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
3879 VectorIndexD:$idx2)),
3881 V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
3884 multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
3885 ValueType VTScal, Instruction INS> {
3886 def : Pat<(VT128 (vector_insert V128:$src,
3887 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
3889 (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
3891 def : Pat<(VT128 (vector_insert V128:$src,
3892 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
3894 (INS V128:$src, imm:$Immd,
3895 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
3897 def : Pat<(VT64 (vector_insert V64:$src,
3898 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
3900 (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
3901 imm:$Immd, V128:$Rn, imm:$Immn),
3904 def : Pat<(VT64 (vector_insert V64:$src,
3905 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
3908 (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
3909 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
3913 defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
3914 defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
3915 defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
3918 // Floating point vector extractions are codegen'd as either a sequence of
3919 // subregister extractions, or a MOV (aka CPY here, alias for DUP) if
3920 // the lane number is anything other than zero.
3921 def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
3922 (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
3923 def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
3924 (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
3925 def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
3926 (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
3928 def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
3929 (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
3930 def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
3931 (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
3932 def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
3933 (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
3935 // All concat_vectors operations are canonicalised to act on i64 vectors for
3936 // AArch64. In the general case we need an instruction, which had just as well be
3938 class ConcatPat<ValueType DstTy, ValueType SrcTy>
3939 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
3940 (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
3941 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
3943 def : ConcatPat<v2i64, v1i64>;
3944 def : ConcatPat<v2f64, v1f64>;
3945 def : ConcatPat<v4i32, v2i32>;
3946 def : ConcatPat<v4f32, v2f32>;
3947 def : ConcatPat<v8i16, v4i16>;
3948 def : ConcatPat<v8f16, v4f16>;
3949 def : ConcatPat<v16i8, v8i8>;
3951 // If the high lanes are undef, though, we can just ignore them:
3952 class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
3953 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
3954 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
3956 def : ConcatUndefPat<v2i64, v1i64>;
3957 def : ConcatUndefPat<v2f64, v1f64>;
3958 def : ConcatUndefPat<v4i32, v2i32>;
3959 def : ConcatUndefPat<v4f32, v2f32>;
3960 def : ConcatUndefPat<v8i16, v4i16>;
3961 def : ConcatUndefPat<v16i8, v8i8>;
3963 //----------------------------------------------------------------------------
3964 // AdvSIMD across lanes instructions
3965 //----------------------------------------------------------------------------
3967 defm ADDV : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
3968 defm SMAXV : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
3969 defm SMINV : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
3970 defm UMAXV : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
3971 defm UMINV : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
3972 defm SADDLV : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
3973 defm UADDLV : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
3974 defm FMAXNMV : SIMDAcrossLanesS<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
3975 defm FMAXV : SIMDAcrossLanesS<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
3976 defm FMINNMV : SIMDAcrossLanesS<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
3977 defm FMINV : SIMDAcrossLanesS<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
3979 // Patterns for across-vector intrinsics, that have a node equivalent, that
3980 // returns a vector (with only the low lane defined) instead of a scalar.
3981 // In effect, opNode is the same as (scalar_to_vector (IntNode)).
3982 multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
3983 SDPatternOperator opNode> {
3984 // If a lane instruction caught the vector_extract around opNode, we can
3985 // directly match the latter to the instruction.
3986 def : Pat<(v8i8 (opNode V64:$Rn)),
3987 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
3988 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
3989 def : Pat<(v16i8 (opNode V128:$Rn)),
3990 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
3991 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
3992 def : Pat<(v4i16 (opNode V64:$Rn)),
3993 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
3994 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
3995 def : Pat<(v8i16 (opNode V128:$Rn)),
3996 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
3997 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
3998 def : Pat<(v4i32 (opNode V128:$Rn)),
3999 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4000 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
4003 // If none did, fallback to the explicit patterns, consuming the vector_extract.
4004 def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
4005 (i32 0)), (i64 0))),
4006 (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
4007 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
4009 def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
4010 (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4011 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
4013 def : Pat<(i32 (vector_extract (insert_subvector undef,
4014 (v4i16 (opNode V64:$Rn)), (i32 0)), (i64 0))),
4015 (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
4016 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
4018 def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
4019 (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
4020 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
4022 def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
4023 (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4024 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
4029 multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
4030 SDPatternOperator opNode>
4031 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
4032 // If there is a sign extension after this intrinsic, consume it as smov already
4034 def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
4035 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), i8)),
4037 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4038 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
4040 def : Pat<(i32 (sext_inreg (i32 (vector_extract
4041 (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
4043 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4044 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
4046 def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
4047 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), i16)),
4049 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4050 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
4052 def : Pat<(i32 (sext_inreg (i32 (vector_extract
4053 (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
4055 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4056 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
4060 multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
4061 SDPatternOperator opNode>
4062 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
4063 // If there is a masking operation keeping only what has been actually
4064 // generated, consume it.
4065 def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
4066 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), maski8_or_more)),
4067 (i32 (EXTRACT_SUBREG
4068 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4069 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
4071 def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
4073 (i32 (EXTRACT_SUBREG
4074 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4075 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
4077 def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
4078 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), maski16_or_more)),
4079 (i32 (EXTRACT_SUBREG
4080 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4081 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
4083 def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
4085 (i32 (EXTRACT_SUBREG
4086 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4087 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
4091 defm : SIMDAcrossLanesSignedIntrinsic<"ADDV", AArch64saddv>;
4092 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
4093 def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
4094 (ADDPv2i32 V64:$Rn, V64:$Rn)>;
4096 defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
4097 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
4098 def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
4099 (ADDPv2i32 V64:$Rn, V64:$Rn)>;
4101 defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
4102 def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
4103 (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
4105 defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
4106 def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
4107 (SMINPv2i32 V64:$Rn, V64:$Rn)>;
4109 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
4110 def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
4111 (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
4113 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
4114 def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
4115 (UMINPv2i32 V64:$Rn, V64:$Rn)>;
4117 multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
4118 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
4120 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4121 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
4123 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
4125 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4126 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
4129 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
4130 (i32 (EXTRACT_SUBREG
4131 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4132 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
4134 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
4135 (i32 (EXTRACT_SUBREG
4136 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4137 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
4140 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
4141 (i64 (EXTRACT_SUBREG
4142 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4143 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
4147 multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
4149 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
4150 (i32 (EXTRACT_SUBREG
4151 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4152 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
4154 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
4155 (i32 (EXTRACT_SUBREG
4156 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4157 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
4160 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
4161 (i32 (EXTRACT_SUBREG
4162 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4163 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
4165 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
4166 (i32 (EXTRACT_SUBREG
4167 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4168 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
4171 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
4172 (i64 (EXTRACT_SUBREG
4173 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4174 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
4178 defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
4179 defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
4181 // The vaddlv_s32 intrinsic gets mapped to SADDLP.
4182 def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
4183 (i64 (EXTRACT_SUBREG
4184 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4185 (SADDLPv2i32_v1i64 V64:$Rn), dsub),
4187 // The vaddlv_u32 intrinsic gets mapped to UADDLP.
4188 def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
4189 (i64 (EXTRACT_SUBREG
4190 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4191 (UADDLPv2i32_v1i64 V64:$Rn), dsub),
4194 //------------------------------------------------------------------------------
4195 // AdvSIMD modified immediate instructions
4196 //------------------------------------------------------------------------------
4199 defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
4201 defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
4203 def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>;
4204 def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
4205 def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>;
4206 def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
4208 def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4209 def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4210 def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4211 def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4213 def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>;
4214 def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
4215 def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>;
4216 def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
4218 def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4219 def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4220 def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4221 def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4224 def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0b1111, V128, fpimm8,
4226 [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4227 def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0b1111, V64, fpimm8,
4229 [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4230 def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0b1111, V128, fpimm8,
4232 [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4236 // EDIT byte mask: scalar
4237 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
4238 def MOVID : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
4239 [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
4240 // The movi_edit node has the immediate value already encoded, so we use
4241 // a plain imm0_255 here.
4242 def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
4243 (MOVID imm0_255:$shift)>;
4245 def : Pat<(v1i64 immAllZerosV), (MOVID (i32 0))>;
4246 def : Pat<(v2i32 immAllZerosV), (MOVID (i32 0))>;
4247 def : Pat<(v4i16 immAllZerosV), (MOVID (i32 0))>;
4248 def : Pat<(v8i8 immAllZerosV), (MOVID (i32 0))>;
4250 def : Pat<(v1i64 immAllOnesV), (MOVID (i32 255))>;
4251 def : Pat<(v2i32 immAllOnesV), (MOVID (i32 255))>;
4252 def : Pat<(v4i16 immAllOnesV), (MOVID (i32 255))>;
4253 def : Pat<(v8i8 immAllOnesV), (MOVID (i32 255))>;
4255 // EDIT byte mask: 2d
4257 // The movi_edit node has the immediate value already encoded, so we use
4258 // a plain imm0_255 in the pattern
4259 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
4260 def MOVIv2d_ns : SIMDModifiedImmVectorNoShift<1, 1, 0b1110, V128,
4263 [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
4266 // Use movi.2d to materialize 0.0 if the HW does zero-cycle zeroing.
4267 // Complexity is added to break a tie with a plain MOVI.
4268 let AddedComplexity = 1 in {
4269 def : Pat<(f32 fpimm0),
4270 (f32 (EXTRACT_SUBREG (v2i64 (MOVIv2d_ns (i32 0))), ssub))>,
4272 def : Pat<(f64 fpimm0),
4273 (f64 (EXTRACT_SUBREG (v2i64 (MOVIv2d_ns (i32 0))), dsub))>,
4277 def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4278 def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4279 def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4280 def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4282 def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4283 def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4284 def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4285 def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4287 def : Pat<(v2f64 (AArch64dup (f64 fpimm0))), (MOVIv2d_ns (i32 0))>;
4288 def : Pat<(v4f32 (AArch64dup (f32 fpimm0))), (MOVIv2d_ns (i32 0))>;
4290 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
4291 defm MOVI : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
4293 def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4294 def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4295 def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4296 def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4298 def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4299 def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4300 def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4301 def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4303 def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4304 (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
4305 def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4306 (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
4307 def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4308 (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
4309 def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4310 (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
4312 // EDIT per word: 2s & 4s with MSL shifter
4313 def MOVIv2s_msl : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
4314 [(set (v2i32 V64:$Rd),
4315 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4316 def MOVIv4s_msl : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
4317 [(set (v4i32 V128:$Rd),
4318 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4320 // Per byte: 8b & 16b
4321 def MOVIv8b_ns : SIMDModifiedImmVectorNoShift<0, 0, 0b1110, V64, imm0_255,
4323 [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
4324 def MOVIv16b_ns : SIMDModifiedImmVectorNoShift<1, 0, 0b1110, V128, imm0_255,
4326 [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
4330 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
4331 defm MVNI : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
4333 def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4334 def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4335 def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4336 def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4338 def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4339 def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4340 def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4341 def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4343 def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4344 (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
4345 def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4346 (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
4347 def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4348 (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
4349 def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4350 (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
4352 // EDIT per word: 2s & 4s with MSL shifter
4353 def MVNIv2s_msl : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
4354 [(set (v2i32 V64:$Rd),
4355 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4356 def MVNIv4s_msl : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
4357 [(set (v4i32 V128:$Rd),
4358 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4360 //----------------------------------------------------------------------------
4361 // AdvSIMD indexed element
4362 //----------------------------------------------------------------------------
4364 let hasSideEffects = 0 in {
4365 defm FMLA : SIMDFPIndexedTied<0, 0b0001, "fmla">;
4366 defm FMLS : SIMDFPIndexedTied<0, 0b0101, "fmls">;
4369 // NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
4370 // instruction expects the addend first, while the intrinsic expects it last.
4372 // On the other hand, there are quite a few valid combinatorial options due to
4373 // the commutativity of multiplication and the fact that (-x) * y = x * (-y).
4374 defm : SIMDFPIndexedTiedPatterns<"FMLA",
4375 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
4376 defm : SIMDFPIndexedTiedPatterns<"FMLA",
4377 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
4379 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4380 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
4381 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4382 TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
4383 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4384 TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
4385 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4386 TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
4388 multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
4389 // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
4391 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
4392 (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
4393 VectorIndexS:$idx))),
4394 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
4395 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
4396 (v2f32 (AArch64duplane32
4397 (v4f32 (insert_subvector undef,
4398 (v2f32 (fneg V64:$Rm)),
4400 VectorIndexS:$idx)))),
4401 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
4402 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
4403 VectorIndexS:$idx)>;
4404 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
4405 (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
4406 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
4407 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
4409 // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
4411 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
4412 (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
4413 VectorIndexS:$idx))),
4414 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
4415 VectorIndexS:$idx)>;
4416 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
4417 (v4f32 (AArch64duplane32
4418 (v4f32 (insert_subvector undef,
4419 (v2f32 (fneg V64:$Rm)),
4421 VectorIndexS:$idx)))),
4422 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
4423 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
4424 VectorIndexS:$idx)>;
4425 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
4426 (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
4427 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
4428 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
4430 // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
4431 // (DUPLANE from 64-bit would be trivial).
4432 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
4433 (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
4434 VectorIndexD:$idx))),
4436 V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
4437 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
4438 (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
4439 (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
4440 (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
4442 // 2 variants for 32-bit scalar version: extract from .2s or from .4s
4443 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
4444 (vector_extract (v4f32 (fneg V128:$Rm)),
4445 VectorIndexS:$idx))),
4446 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
4447 V128:$Rm, VectorIndexS:$idx)>;
4448 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
4449 (vector_extract (v4f32 (insert_subvector undef,
4450 (v2f32 (fneg V64:$Rm)),
4452 VectorIndexS:$idx))),
4453 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
4454 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
4456 // 1 variant for 64-bit scalar version: extract from .1d or from .2d
4457 def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
4458 (vector_extract (v2f64 (fneg V128:$Rm)),
4459 VectorIndexS:$idx))),
4460 (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
4461 V128:$Rm, VectorIndexS:$idx)>;
4464 defm : FMLSIndexedAfterNegPatterns<
4465 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
4466 defm : FMLSIndexedAfterNegPatterns<
4467 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
4469 defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
4470 defm FMUL : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
4472 def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
4473 (FMULv2i32_indexed V64:$Rn,
4474 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
4476 def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
4477 (FMULv4i32_indexed V128:$Rn,
4478 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
4480 def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
4481 (FMULv2i64_indexed V128:$Rn,
4482 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
4485 defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
4486 defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4487 defm MLA : SIMDVectorIndexedHSTied<1, 0b0000, "mla",
4488 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))>>;
4489 defm MLS : SIMDVectorIndexedHSTied<1, 0b0100, "mls",
4490 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))>>;
4491 defm MUL : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
4492 defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
4493 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4494 defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
4495 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4496 defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
4497 int_aarch64_neon_smull>;
4498 defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
4499 int_aarch64_neon_sqadd>;
4500 defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
4501 int_aarch64_neon_sqsub>;
4502 defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
4503 int_aarch64_neon_sqadd>;
4504 defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
4505 int_aarch64_neon_sqsub>;
4506 defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
4507 defm UMLAL : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
4508 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4509 defm UMLSL : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
4510 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4511 defm UMULL : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
4512 int_aarch64_neon_umull>;
4514 // A scalar sqdmull with the second operand being a vector lane can be
4515 // handled directly with the indexed instruction encoding.
4516 def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4517 (vector_extract (v4i32 V128:$Vm),
4518 VectorIndexS:$idx)),
4519 (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
4521 //----------------------------------------------------------------------------
4522 // AdvSIMD scalar shift instructions
4523 //----------------------------------------------------------------------------
4524 defm FCVTZS : SIMDScalarRShiftSD<0, 0b11111, "fcvtzs">;
4525 defm FCVTZU : SIMDScalarRShiftSD<1, 0b11111, "fcvtzu">;
4526 defm SCVTF : SIMDScalarRShiftSD<0, 0b11100, "scvtf">;
4527 defm UCVTF : SIMDScalarRShiftSD<1, 0b11100, "ucvtf">;
4528 // Codegen patterns for the above. We don't put these directly on the
4529 // instructions because TableGen's type inference can't handle the truth.
4530 // Having the same base pattern for fp <--> int totally freaks it out.
4531 def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
4532 (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
4533 def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
4534 (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
4535 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
4536 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
4537 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
4538 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
4539 def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
4541 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
4542 def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
4544 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
4545 def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
4546 (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
4547 def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
4548 (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
4549 def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
4550 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4551 def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
4552 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4553 def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
4555 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4556 def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
4558 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4560 defm SHL : SIMDScalarLShiftD< 0, 0b01010, "shl", AArch64vshl>;
4561 defm SLI : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
4562 defm SQRSHRN : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
4563 int_aarch64_neon_sqrshrn>;
4564 defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
4565 int_aarch64_neon_sqrshrun>;
4566 defm SQSHLU : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
4567 defm SQSHL : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
4568 defm SQSHRN : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
4569 int_aarch64_neon_sqshrn>;
4570 defm SQSHRUN : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
4571 int_aarch64_neon_sqshrun>;
4572 defm SRI : SIMDScalarRShiftDTied< 1, 0b01000, "sri">;
4573 defm SRSHR : SIMDScalarRShiftD< 0, 0b00100, "srshr", AArch64srshri>;
4574 defm SRSRA : SIMDScalarRShiftDTied< 0, 0b00110, "srsra",
4575 TriOpFrag<(add node:$LHS,
4576 (AArch64srshri node:$MHS, node:$RHS))>>;
4577 defm SSHR : SIMDScalarRShiftD< 0, 0b00000, "sshr", AArch64vashr>;
4578 defm SSRA : SIMDScalarRShiftDTied< 0, 0b00010, "ssra",
4579 TriOpFrag<(add node:$LHS,
4580 (AArch64vashr node:$MHS, node:$RHS))>>;
4581 defm UQRSHRN : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
4582 int_aarch64_neon_uqrshrn>;
4583 defm UQSHL : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
4584 defm UQSHRN : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
4585 int_aarch64_neon_uqshrn>;
4586 defm URSHR : SIMDScalarRShiftD< 1, 0b00100, "urshr", AArch64urshri>;
4587 defm URSRA : SIMDScalarRShiftDTied< 1, 0b00110, "ursra",
4588 TriOpFrag<(add node:$LHS,
4589 (AArch64urshri node:$MHS, node:$RHS))>>;
4590 defm USHR : SIMDScalarRShiftD< 1, 0b00000, "ushr", AArch64vlshr>;
4591 defm USRA : SIMDScalarRShiftDTied< 1, 0b00010, "usra",
4592 TriOpFrag<(add node:$LHS,
4593 (AArch64vlshr node:$MHS, node:$RHS))>>;
4595 //----------------------------------------------------------------------------
4596 // AdvSIMD vector shift instructions
4597 //----------------------------------------------------------------------------
4598 defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
4599 defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
4600 defm SCVTF: SIMDVectorRShiftSDToFP<0, 0b11100, "scvtf",
4601 int_aarch64_neon_vcvtfxs2fp>;
4602 defm RSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
4603 int_aarch64_neon_rshrn>;
4604 defm SHL : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
4605 defm SHRN : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
4606 BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
4607 defm SLI : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", int_aarch64_neon_vsli>;
4608 def : Pat<(v1i64 (int_aarch64_neon_vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
4609 (i32 vecshiftL64:$imm))),
4610 (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
4611 defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
4612 int_aarch64_neon_sqrshrn>;
4613 defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
4614 int_aarch64_neon_sqrshrun>;
4615 defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
4616 defm SQSHL : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
4617 defm SQSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
4618 int_aarch64_neon_sqshrn>;
4619 defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
4620 int_aarch64_neon_sqshrun>;
4621 defm SRI : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", int_aarch64_neon_vsri>;
4622 def : Pat<(v1i64 (int_aarch64_neon_vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
4623 (i32 vecshiftR64:$imm))),
4624 (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
4625 defm SRSHR : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
4626 defm SRSRA : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
4627 TriOpFrag<(add node:$LHS,
4628 (AArch64srshri node:$MHS, node:$RHS))> >;
4629 defm SSHLL : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
4630 BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
4632 defm SSHR : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
4633 defm SSRA : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
4634 TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
4635 defm UCVTF : SIMDVectorRShiftSDToFP<1, 0b11100, "ucvtf",
4636 int_aarch64_neon_vcvtfxu2fp>;
4637 defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
4638 int_aarch64_neon_uqrshrn>;
4639 defm UQSHL : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
4640 defm UQSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
4641 int_aarch64_neon_uqshrn>;
4642 defm URSHR : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
4643 defm URSRA : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
4644 TriOpFrag<(add node:$LHS,
4645 (AArch64urshri node:$MHS, node:$RHS))> >;
4646 defm USHLL : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
4647 BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
4648 defm USHR : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
4649 defm USRA : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
4650 TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
4652 // SHRN patterns for when a logical right shift was used instead of arithmetic
4653 // (the immediate guarantees no sign bits actually end up in the result so it
4655 def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
4656 (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
4657 def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
4658 (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
4659 def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
4660 (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
4662 def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
4663 (trunc (AArch64vlshr (v8i16 V128:$Rn),
4664 vecshiftR16Narrow:$imm)))),
4665 (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4666 V128:$Rn, vecshiftR16Narrow:$imm)>;
4667 def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
4668 (trunc (AArch64vlshr (v4i32 V128:$Rn),
4669 vecshiftR32Narrow:$imm)))),
4670 (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4671 V128:$Rn, vecshiftR32Narrow:$imm)>;
4672 def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
4673 (trunc (AArch64vlshr (v2i64 V128:$Rn),
4674 vecshiftR64Narrow:$imm)))),
4675 (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4676 V128:$Rn, vecshiftR32Narrow:$imm)>;
4678 // Vector sign and zero extensions are implemented with SSHLL and USSHLL.
4679 // Anyexts are implemented as zexts.
4680 def : Pat<(v8i16 (sext (v8i8 V64:$Rn))), (SSHLLv8i8_shift V64:$Rn, (i32 0))>;
4681 def : Pat<(v8i16 (zext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
4682 def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
4683 def : Pat<(v4i32 (sext (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
4684 def : Pat<(v4i32 (zext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
4685 def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
4686 def : Pat<(v2i64 (sext (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
4687 def : Pat<(v2i64 (zext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
4688 def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
4689 // Also match an extend from the upper half of a 128 bit source register.
4690 def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4691 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
4692 def : Pat<(v8i16 (zext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4693 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
4694 def : Pat<(v8i16 (sext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4695 (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
4696 def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4697 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
4698 def : Pat<(v4i32 (zext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4699 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
4700 def : Pat<(v4i32 (sext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4701 (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
4702 def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4703 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
4704 def : Pat<(v2i64 (zext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4705 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
4706 def : Pat<(v2i64 (sext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4707 (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
4709 // Vector shift sxtl aliases
4710 def : InstAlias<"sxtl.8h $dst, $src1",
4711 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4712 def : InstAlias<"sxtl $dst.8h, $src1.8b",
4713 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4714 def : InstAlias<"sxtl.4s $dst, $src1",
4715 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4716 def : InstAlias<"sxtl $dst.4s, $src1.4h",
4717 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4718 def : InstAlias<"sxtl.2d $dst, $src1",
4719 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4720 def : InstAlias<"sxtl $dst.2d, $src1.2s",
4721 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4723 // Vector shift sxtl2 aliases
4724 def : InstAlias<"sxtl2.8h $dst, $src1",
4725 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4726 def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
4727 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4728 def : InstAlias<"sxtl2.4s $dst, $src1",
4729 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4730 def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
4731 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4732 def : InstAlias<"sxtl2.2d $dst, $src1",
4733 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4734 def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
4735 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4737 // Vector shift uxtl aliases
4738 def : InstAlias<"uxtl.8h $dst, $src1",
4739 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4740 def : InstAlias<"uxtl $dst.8h, $src1.8b",
4741 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4742 def : InstAlias<"uxtl.4s $dst, $src1",
4743 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4744 def : InstAlias<"uxtl $dst.4s, $src1.4h",
4745 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4746 def : InstAlias<"uxtl.2d $dst, $src1",
4747 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4748 def : InstAlias<"uxtl $dst.2d, $src1.2s",
4749 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4751 // Vector shift uxtl2 aliases
4752 def : InstAlias<"uxtl2.8h $dst, $src1",
4753 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4754 def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
4755 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4756 def : InstAlias<"uxtl2.4s $dst, $src1",
4757 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4758 def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
4759 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4760 def : InstAlias<"uxtl2.2d $dst, $src1",
4761 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4762 def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
4763 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4765 // If an integer is about to be converted to a floating point value,
4766 // just load it on the floating point unit.
4767 // These patterns are more complex because floating point loads do not
4768 // support sign extension.
4769 // The sign extension has to be explicitly added and is only supported for
4770 // one step: byte-to-half, half-to-word, word-to-doubleword.
4771 // SCVTF GPR -> FPR is 9 cycles.
4772 // SCVTF FPR -> FPR is 4 cyclces.
4773 // (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
4774 // Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
4775 // and still being faster.
4776 // However, this is not good for code size.
4777 // 8-bits -> float. 2 sizes step-up.
4778 class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
4779 : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
4780 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4785 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4791 ssub)))>, Requires<[NotForCodeSize, IsCyclone]>;
4793 def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
4794 (LDRBroW GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
4795 def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
4796 (LDRBroX GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
4797 def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
4798 (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
4799 def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
4800 (LDURBi GPR64sp:$Rn, simm9:$offset)>;
4802 // 16-bits -> float. 1 size step-up.
4803 class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
4804 : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
4805 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4807 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4811 ssub)))>, Requires<[NotForCodeSize]>;
4813 def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
4814 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
4815 def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
4816 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
4817 def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
4818 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
4819 def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
4820 (LDURHi GPR64sp:$Rn, simm9:$offset)>;
4822 // 32-bits to 32-bits are handled in target specific dag combine:
4823 // performIntToFpCombine.
4824 // 64-bits integer to 32-bits floating point, not possible with
4825 // SCVTF on floating point registers (both source and destination
4826 // must have the same size).
4828 // Here are the patterns for 8, 16, 32, and 64-bits to double.
4829 // 8-bits -> double. 3 size step-up: give up.
4830 // 16-bits -> double. 2 size step.
4831 class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
4832 : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
4833 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
4838 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4844 dsub)))>, Requires<[NotForCodeSize, IsCyclone]>;
4846 def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
4847 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
4848 def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
4849 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
4850 def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
4851 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
4852 def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
4853 (LDURHi GPR64sp:$Rn, simm9:$offset)>;
4854 // 32-bits -> double. 1 size step-up.
4855 class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
4856 : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
4857 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
4859 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4863 dsub)))>, Requires<[NotForCodeSize]>;
4865 def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
4866 (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
4867 def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
4868 (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
4869 def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
4870 (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
4871 def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
4872 (LDURSi GPR64sp:$Rn, simm9:$offset)>;
4874 // 64-bits -> double are handled in target specific dag combine:
4875 // performIntToFpCombine.
4878 //----------------------------------------------------------------------------
4879 // AdvSIMD Load-Store Structure
4880 //----------------------------------------------------------------------------
4881 defm LD1 : SIMDLd1Multiple<"ld1">;
4882 defm LD2 : SIMDLd2Multiple<"ld2">;
4883 defm LD3 : SIMDLd3Multiple<"ld3">;
4884 defm LD4 : SIMDLd4Multiple<"ld4">;
4886 defm ST1 : SIMDSt1Multiple<"st1">;
4887 defm ST2 : SIMDSt2Multiple<"st2">;
4888 defm ST3 : SIMDSt3Multiple<"st3">;
4889 defm ST4 : SIMDSt4Multiple<"st4">;
4891 class Ld1Pat<ValueType ty, Instruction INST>
4892 : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
4894 def : Ld1Pat<v16i8, LD1Onev16b>;
4895 def : Ld1Pat<v8i16, LD1Onev8h>;
4896 def : Ld1Pat<v4i32, LD1Onev4s>;
4897 def : Ld1Pat<v2i64, LD1Onev2d>;
4898 def : Ld1Pat<v8i8, LD1Onev8b>;
4899 def : Ld1Pat<v4i16, LD1Onev4h>;
4900 def : Ld1Pat<v2i32, LD1Onev2s>;
4901 def : Ld1Pat<v1i64, LD1Onev1d>;
4903 class St1Pat<ValueType ty, Instruction INST>
4904 : Pat<(store ty:$Vt, GPR64sp:$Rn),
4905 (INST ty:$Vt, GPR64sp:$Rn)>;
4907 def : St1Pat<v16i8, ST1Onev16b>;
4908 def : St1Pat<v8i16, ST1Onev8h>;
4909 def : St1Pat<v4i32, ST1Onev4s>;
4910 def : St1Pat<v2i64, ST1Onev2d>;
4911 def : St1Pat<v8i8, ST1Onev8b>;
4912 def : St1Pat<v4i16, ST1Onev4h>;
4913 def : St1Pat<v2i32, ST1Onev2s>;
4914 def : St1Pat<v1i64, ST1Onev1d>;
4920 defm LD1R : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
4921 defm LD2R : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
4922 defm LD3R : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
4923 defm LD4R : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
4924 let mayLoad = 1, hasSideEffects = 0 in {
4925 defm LD1 : SIMDLdSingleBTied<0, 0b000, "ld1", VecListOneb, GPR64pi1>;
4926 defm LD1 : SIMDLdSingleHTied<0, 0b010, 0, "ld1", VecListOneh, GPR64pi2>;
4927 defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes, GPR64pi4>;
4928 defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned, GPR64pi8>;
4929 defm LD2 : SIMDLdSingleBTied<1, 0b000, "ld2", VecListTwob, GPR64pi2>;
4930 defm LD2 : SIMDLdSingleHTied<1, 0b010, 0, "ld2", VecListTwoh, GPR64pi4>;
4931 defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos, GPR64pi8>;
4932 defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod, GPR64pi16>;
4933 defm LD3 : SIMDLdSingleBTied<0, 0b001, "ld3", VecListThreeb, GPR64pi3>;
4934 defm LD3 : SIMDLdSingleHTied<0, 0b011, 0, "ld3", VecListThreeh, GPR64pi6>;
4935 defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
4936 defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
4937 defm LD4 : SIMDLdSingleBTied<1, 0b001, "ld4", VecListFourb, GPR64pi4>;
4938 defm LD4 : SIMDLdSingleHTied<1, 0b011, 0, "ld4", VecListFourh, GPR64pi8>;
4939 defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours, GPR64pi16>;
4940 defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd, GPR64pi32>;
4943 def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
4944 (LD1Rv8b GPR64sp:$Rn)>;
4945 def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
4946 (LD1Rv16b GPR64sp:$Rn)>;
4947 def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
4948 (LD1Rv4h GPR64sp:$Rn)>;
4949 def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
4950 (LD1Rv8h GPR64sp:$Rn)>;
4951 def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
4952 (LD1Rv2s GPR64sp:$Rn)>;
4953 def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
4954 (LD1Rv4s GPR64sp:$Rn)>;
4955 def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
4956 (LD1Rv2d GPR64sp:$Rn)>;
4957 def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
4958 (LD1Rv1d GPR64sp:$Rn)>;
4959 // Grab the floating point version too
4960 def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
4961 (LD1Rv2s GPR64sp:$Rn)>;
4962 def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
4963 (LD1Rv4s GPR64sp:$Rn)>;
4964 def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
4965 (LD1Rv2d GPR64sp:$Rn)>;
4966 def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
4967 (LD1Rv1d GPR64sp:$Rn)>;
4968 def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
4969 (LD1Rv4h GPR64sp:$Rn)>;
4970 def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
4971 (LD1Rv8h GPR64sp:$Rn)>;
4973 class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
4974 ValueType VTy, ValueType STy, Instruction LD1>
4975 : Pat<(vector_insert (VTy VecListOne128:$Rd),
4976 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
4977 (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
4979 def : Ld1Lane128Pat<extloadi8, VectorIndexB, v16i8, i32, LD1i8>;
4980 def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
4981 def : Ld1Lane128Pat<load, VectorIndexS, v4i32, i32, LD1i32>;
4982 def : Ld1Lane128Pat<load, VectorIndexS, v4f32, f32, LD1i32>;
4983 def : Ld1Lane128Pat<load, VectorIndexD, v2i64, i64, LD1i64>;
4984 def : Ld1Lane128Pat<load, VectorIndexD, v2f64, f64, LD1i64>;
4985 def : Ld1Lane128Pat<load, VectorIndexH, v8f16, f16, LD1i16>;
4987 class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
4988 ValueType VTy, ValueType STy, Instruction LD1>
4989 : Pat<(vector_insert (VTy VecListOne64:$Rd),
4990 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
4992 (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
4993 VecIndex:$idx, GPR64sp:$Rn),
4996 def : Ld1Lane64Pat<extloadi8, VectorIndexB, v8i8, i32, LD1i8>;
4997 def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
4998 def : Ld1Lane64Pat<load, VectorIndexS, v2i32, i32, LD1i32>;
4999 def : Ld1Lane64Pat<load, VectorIndexS, v2f32, f32, LD1i32>;
5000 def : Ld1Lane64Pat<load, VectorIndexH, v4f16, f16, LD1i16>;
5003 defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
5004 defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
5005 defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
5006 defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
5009 defm ST1 : SIMDStSingleB<0, 0b000, "st1", VecListOneb, GPR64pi1>;
5010 defm ST1 : SIMDStSingleH<0, 0b010, 0, "st1", VecListOneh, GPR64pi2>;
5011 defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
5012 defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
5014 let AddedComplexity = 19 in
5015 class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
5016 ValueType VTy, ValueType STy, Instruction ST1>
5018 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5020 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
5022 def : St1Lane128Pat<truncstorei8, VectorIndexB, v16i8, i32, ST1i8>;
5023 def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
5024 def : St1Lane128Pat<store, VectorIndexS, v4i32, i32, ST1i32>;
5025 def : St1Lane128Pat<store, VectorIndexS, v4f32, f32, ST1i32>;
5026 def : St1Lane128Pat<store, VectorIndexD, v2i64, i64, ST1i64>;
5027 def : St1Lane128Pat<store, VectorIndexD, v2f64, f64, ST1i64>;
5028 def : St1Lane128Pat<store, VectorIndexH, v8f16, f16, ST1i16>;
5030 let AddedComplexity = 19 in
5031 class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
5032 ValueType VTy, ValueType STy, Instruction ST1>
5034 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5036 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5037 VecIndex:$idx, GPR64sp:$Rn)>;
5039 def : St1Lane64Pat<truncstorei8, VectorIndexB, v8i8, i32, ST1i8>;
5040 def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
5041 def : St1Lane64Pat<store, VectorIndexS, v2i32, i32, ST1i32>;
5042 def : St1Lane64Pat<store, VectorIndexS, v2f32, f32, ST1i32>;
5043 def : St1Lane64Pat<store, VectorIndexH, v4f16, f16, ST1i16>;
5045 multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
5046 ValueType VTy, ValueType STy, Instruction ST1,
5048 def : Pat<(scalar_store
5049 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5050 GPR64sp:$Rn, offset),
5051 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5052 VecIndex:$idx, GPR64sp:$Rn, XZR)>;
5054 def : Pat<(scalar_store
5055 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5056 GPR64sp:$Rn, GPR64:$Rm),
5057 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5058 VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
5061 defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
5062 defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
5064 defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
5065 defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
5066 defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
5067 defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
5068 defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
5070 multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
5071 ValueType VTy, ValueType STy, Instruction ST1,
5073 def : Pat<(scalar_store
5074 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5075 GPR64sp:$Rn, offset),
5076 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
5078 def : Pat<(scalar_store
5079 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5080 GPR64sp:$Rn, GPR64:$Rm),
5081 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
5084 defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
5086 defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
5088 defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
5089 defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
5090 defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
5091 defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
5092 defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
5094 let mayStore = 1, hasSideEffects = 0 in {
5095 defm ST2 : SIMDStSingleB<1, 0b000, "st2", VecListTwob, GPR64pi2>;
5096 defm ST2 : SIMDStSingleH<1, 0b010, 0, "st2", VecListTwoh, GPR64pi4>;
5097 defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos, GPR64pi8>;
5098 defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod, GPR64pi16>;
5099 defm ST3 : SIMDStSingleB<0, 0b001, "st3", VecListThreeb, GPR64pi3>;
5100 defm ST3 : SIMDStSingleH<0, 0b011, 0, "st3", VecListThreeh, GPR64pi6>;
5101 defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
5102 defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
5103 defm ST4 : SIMDStSingleB<1, 0b001, "st4", VecListFourb, GPR64pi4>;
5104 defm ST4 : SIMDStSingleH<1, 0b011, 0, "st4", VecListFourh, GPR64pi8>;
5105 defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours, GPR64pi16>;
5106 defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd, GPR64pi32>;
5109 defm ST1 : SIMDLdSt1SingleAliases<"st1">;
5110 defm ST2 : SIMDLdSt2SingleAliases<"st2">;
5111 defm ST3 : SIMDLdSt3SingleAliases<"st3">;
5112 defm ST4 : SIMDLdSt4SingleAliases<"st4">;
5114 //----------------------------------------------------------------------------
5115 // Crypto extensions
5116 //----------------------------------------------------------------------------
5118 def AESErr : AESTiedInst<0b0100, "aese", int_aarch64_crypto_aese>;
5119 def AESDrr : AESTiedInst<0b0101, "aesd", int_aarch64_crypto_aesd>;
5120 def AESMCrr : AESInst< 0b0110, "aesmc", int_aarch64_crypto_aesmc>;
5121 def AESIMCrr : AESInst< 0b0111, "aesimc", int_aarch64_crypto_aesimc>;
5123 def SHA1Crrr : SHATiedInstQSV<0b000, "sha1c", int_aarch64_crypto_sha1c>;
5124 def SHA1Prrr : SHATiedInstQSV<0b001, "sha1p", int_aarch64_crypto_sha1p>;
5125 def SHA1Mrrr : SHATiedInstQSV<0b010, "sha1m", int_aarch64_crypto_sha1m>;
5126 def SHA1SU0rrr : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
5127 def SHA256Hrrr : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
5128 def SHA256H2rrr : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
5129 def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
5131 def SHA1Hrr : SHAInstSS< 0b0000, "sha1h", int_aarch64_crypto_sha1h>;
5132 def SHA1SU1rr : SHATiedInstVV<0b0001, "sha1su1", int_aarch64_crypto_sha1su1>;
5133 def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
5135 //----------------------------------------------------------------------------
5137 //----------------------------------------------------------------------------
5138 // FIXME: Like for X86, these should go in their own separate .td file.
5140 // Any instruction that defines a 32-bit result leaves the high half of the
5141 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
5142 // be copying from a truncate. But any other 32-bit operation will zero-extend
5144 // FIXME: X86 also checks for CMOV here. Do we need something similar?
5145 def def32 : PatLeaf<(i32 GPR32:$src), [{
5146 return N->getOpcode() != ISD::TRUNCATE &&
5147 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
5148 N->getOpcode() != ISD::CopyFromReg;
5151 // In the case of a 32-bit def that is known to implicitly zero-extend,
5152 // we can use a SUBREG_TO_REG.
5153 def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
5155 // For an anyext, we don't care what the high bits are, so we can perform an
5156 // INSERT_SUBREF into an IMPLICIT_DEF.
5157 def : Pat<(i64 (anyext GPR32:$src)),
5158 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
5160 // When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
5161 // then assert the extension has happened.
5162 def : Pat<(i64 (zext GPR32:$src)),
5163 (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
5165 // To sign extend, we use a signed bitfield move instruction (SBFM) on the
5166 // containing super-reg.
5167 def : Pat<(i64 (sext GPR32:$src)),
5168 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
5169 def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
5170 def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
5171 def : Pat<(i64 (sext_inreg GPR64:$src, i8)), (SBFMXri GPR64:$src, 0, 7)>;
5172 def : Pat<(i64 (sext_inreg GPR64:$src, i1)), (SBFMXri GPR64:$src, 0, 0)>;
5173 def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
5174 def : Pat<(i32 (sext_inreg GPR32:$src, i8)), (SBFMWri GPR32:$src, 0, 7)>;
5175 def : Pat<(i32 (sext_inreg GPR32:$src, i1)), (SBFMWri GPR32:$src, 0, 0)>;
5177 def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
5178 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
5179 (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
5180 def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
5181 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
5182 (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
5184 def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
5185 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
5186 (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
5187 def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
5188 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
5189 (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
5191 def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
5192 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
5193 (i64 (i64shift_a imm0_63:$imm)),
5194 (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
5196 // sra patterns have an AddedComplexity of 10, so make sure we have a higher
5197 // AddedComplexity for the following patterns since we want to match sext + sra
5198 // patterns before we attempt to match a single sra node.
5199 let AddedComplexity = 20 in {
5200 // We support all sext + sra combinations which preserve at least one bit of the
5201 // original value which is to be sign extended. E.g. we support shifts up to
5203 def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
5204 (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
5205 def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
5206 (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
5208 def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
5209 (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
5210 def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
5211 (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
5213 def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
5214 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
5215 (i64 imm0_31:$imm), 31)>;
5216 } // AddedComplexity = 20
5218 // To truncate, we can simply extract from a subregister.
5219 def : Pat<(i32 (trunc GPR64sp:$src)),
5220 (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
5222 // __builtin_trap() uses the BRK instruction on AArch64.
5223 def : Pat<(trap), (BRK 1)>;
5225 // Conversions within AdvSIMD types in the same register size are free.
5226 // But because we need a consistent lane ordering, in big endian many
5227 // conversions require one or more REV instructions.
5229 // Consider a simple memory load followed by a bitconvert then a store.
5231 // v1 = BITCAST v2i32 v0 to v4i16
5234 // In big endian mode every memory access has an implicit byte swap. LDR and
5235 // STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
5236 // is, they treat the vector as a sequence of elements to be byte-swapped.
5237 // The two pairs of instructions are fundamentally incompatible. We've decided
5238 // to use LD1/ST1 only to simplify compiler implementation.
5240 // LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
5241 // the original code sequence:
5243 // v1 = REV v2i32 (implicit)
5244 // v2 = BITCAST v2i32 v1 to v4i16
5245 // v3 = REV v4i16 v2 (implicit)
5248 // But this is now broken - the value stored is different to the value loaded
5249 // due to lane reordering. To fix this, on every BITCAST we must perform two
5252 // v1 = REV v2i32 (implicit)
5254 // v3 = BITCAST v2i32 v2 to v4i16
5256 // v5 = REV v4i16 v4 (implicit)
5259 // This means an extra two instructions, but actually in most cases the two REV
5260 // instructions can be combined into one. For example:
5261 // (REV64_2s (REV64_4h X)) === (REV32_4h X)
5263 // There is also no 128-bit REV instruction. This must be synthesized with an
5266 // Most bitconverts require some sort of conversion. The only exceptions are:
5267 // a) Identity conversions - vNfX <-> vNiX
5268 // b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
5271 // Natural vector casts (64 bit)
5272 def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
5273 def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
5274 def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
5275 def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
5276 def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
5277 def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
5279 def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
5280 def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
5281 def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
5282 def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
5283 def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
5285 def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
5286 def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
5287 def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
5288 def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
5289 def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
5291 def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
5292 def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
5293 def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
5294 def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
5295 def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
5296 def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
5297 def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
5299 def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
5300 def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
5301 def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
5302 def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
5303 def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
5305 // Natural vector casts (128 bit)
5306 def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
5307 def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
5308 def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
5309 def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
5310 def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
5311 def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
5312 def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
5314 def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
5315 def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
5316 def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
5317 def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
5318 def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
5319 def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
5320 def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
5322 def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
5323 def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
5324 def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
5325 def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
5326 def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
5327 def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
5328 def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
5330 def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
5331 def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
5332 def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
5333 def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
5334 def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
5335 def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
5336 def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
5338 def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
5339 def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
5340 def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
5341 def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
5342 def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
5343 def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
5344 def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
5346 def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
5347 def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
5348 def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
5349 def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
5350 def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
5351 def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
5352 def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
5354 let Predicates = [IsLE] in {
5355 def : Pat<(v8i8 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5356 def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5357 def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5358 def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5359 def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5361 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
5362 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5363 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
5364 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5365 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
5366 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5367 def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
5368 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5369 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
5370 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5371 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
5372 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5374 let Predicates = [IsBE] in {
5375 def : Pat<(v8i8 (bitconvert GPR64:$Xn)),
5376 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5377 def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
5378 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5379 def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
5380 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5381 def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
5382 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5383 def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
5384 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5386 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
5387 (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5388 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
5389 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5390 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
5391 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5392 def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
5393 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5394 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
5395 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5397 def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5398 def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5399 def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
5400 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5401 def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
5402 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5403 def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
5404 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5405 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
5407 def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
5408 (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
5409 def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
5410 (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
5411 def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
5412 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5413 def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
5414 (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
5415 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
5416 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5418 let Predicates = [IsLE] in {
5419 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
5420 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
5421 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
5422 def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
5423 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
5425 let Predicates = [IsBE] in {
5426 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
5427 (v1i64 (REV64v2i32 FPR64:$src))>;
5428 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
5429 (v1i64 (REV64v4i16 FPR64:$src))>;
5430 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))),
5431 (v1i64 (REV64v8i8 FPR64:$src))>;
5432 def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
5433 (v1i64 (REV64v4i16 FPR64:$src))>;
5434 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
5435 (v1i64 (REV64v2i32 FPR64:$src))>;
5437 def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
5438 def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
5440 let Predicates = [IsLE] in {
5441 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
5442 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
5443 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
5444 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
5445 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
5446 def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
5448 let Predicates = [IsBE] in {
5449 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
5450 (v2i32 (REV64v2i32 FPR64:$src))>;
5451 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
5452 (v2i32 (REV32v4i16 FPR64:$src))>;
5453 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))),
5454 (v2i32 (REV32v8i8 FPR64:$src))>;
5455 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))),
5456 (v2i32 (REV64v2i32 FPR64:$src))>;
5457 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
5458 (v2i32 (REV64v2i32 FPR64:$src))>;
5459 def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
5460 (v2i32 (REV64v4i16 FPR64:$src))>;
5462 def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
5464 let Predicates = [IsLE] in {
5465 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
5466 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
5467 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
5468 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
5469 def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
5470 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
5471 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
5473 let Predicates = [IsBE] in {
5474 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
5475 (v4i16 (REV64v4i16 FPR64:$src))>;
5476 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
5477 (v4i16 (REV32v4i16 FPR64:$src))>;
5478 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))),
5479 (v4i16 (REV16v8i8 FPR64:$src))>;
5480 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))),
5481 (v4i16 (REV64v4i16 FPR64:$src))>;
5482 def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))),
5483 (v4i16 (REV32v4i16 FPR64:$src))>;
5484 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
5485 (v4i16 (REV32v4i16 FPR64:$src))>;
5486 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
5487 (v4i16 (REV64v4i16 FPR64:$src))>;
5490 let Predicates = [IsLE] in {
5491 def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
5492 def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
5493 def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
5494 def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
5495 def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
5496 def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
5497 def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
5499 let Predicates = [IsBE] in {
5500 def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
5501 (v4f16 (REV64v4i16 FPR64:$src))>;
5502 def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
5503 (v4f16 (REV64v4i16 FPR64:$src))>;
5504 def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))),
5505 (v4f16 (REV64v4i16 FPR64:$src))>;
5506 def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))),
5507 (v4f16 (REV16v8i8 FPR64:$src))>;
5508 def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))),
5509 (v4f16 (REV64v4i16 FPR64:$src))>;
5510 def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
5511 (v4f16 (REV64v4i16 FPR64:$src))>;
5512 def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
5513 (v4f16 (REV64v4i16 FPR64:$src))>;
5518 let Predicates = [IsLE] in {
5519 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>;
5520 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
5521 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
5522 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
5523 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
5524 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), (v8i8 FPR64:$src)>;
5525 def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))), (v8i8 FPR64:$src)>;
5527 let Predicates = [IsBE] in {
5528 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))),
5529 (v8i8 (REV64v8i8 FPR64:$src))>;
5530 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))),
5531 (v8i8 (REV32v8i8 FPR64:$src))>;
5532 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))),
5533 (v8i8 (REV16v8i8 FPR64:$src))>;
5534 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))),
5535 (v8i8 (REV64v8i8 FPR64:$src))>;
5536 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))),
5537 (v8i8 (REV32v8i8 FPR64:$src))>;
5538 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))),
5539 (v8i8 (REV64v8i8 FPR64:$src))>;
5540 def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))),
5541 (v8i8 (REV16v8i8 FPR64:$src))>;
5544 let Predicates = [IsLE] in {
5545 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), (f64 FPR64:$src)>;
5546 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), (f64 FPR64:$src)>;
5547 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), (f64 FPR64:$src)>;
5548 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), (f64 FPR64:$src)>;
5549 def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))), (f64 FPR64:$src)>;
5551 let Predicates = [IsBE] in {
5552 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))),
5553 (f64 (REV64v2i32 FPR64:$src))>;
5554 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))),
5555 (f64 (REV64v4i16 FPR64:$src))>;
5556 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))),
5557 (f64 (REV64v2i32 FPR64:$src))>;
5558 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))),
5559 (f64 (REV64v8i8 FPR64:$src))>;
5560 def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))),
5561 (f64 (REV64v4i16 FPR64:$src))>;
5563 def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
5564 def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
5566 let Predicates = [IsLE] in {
5567 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
5568 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
5569 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), (v1f64 FPR64:$src)>;
5570 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
5571 def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
5573 let Predicates = [IsBE] in {
5574 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
5575 (v1f64 (REV64v2i32 FPR64:$src))>;
5576 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
5577 (v1f64 (REV64v4i16 FPR64:$src))>;
5578 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))),
5579 (v1f64 (REV64v8i8 FPR64:$src))>;
5580 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
5581 (v1f64 (REV64v2i32 FPR64:$src))>;
5582 def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
5583 (v1f64 (REV64v4i16 FPR64:$src))>;
5585 def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
5586 def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
5588 let Predicates = [IsLE] in {
5589 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
5590 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
5591 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
5592 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
5593 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
5594 def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
5596 let Predicates = [IsBE] in {
5597 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
5598 (v2f32 (REV64v2i32 FPR64:$src))>;
5599 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
5600 (v2f32 (REV32v4i16 FPR64:$src))>;
5601 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))),
5602 (v2f32 (REV32v8i8 FPR64:$src))>;
5603 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
5604 (v2f32 (REV64v2i32 FPR64:$src))>;
5605 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))),
5606 (v2f32 (REV64v2i32 FPR64:$src))>;
5607 def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
5608 (v2f32 (REV64v4i16 FPR64:$src))>;
5610 def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
5612 let Predicates = [IsLE] in {
5613 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
5614 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
5615 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
5616 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
5617 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
5618 def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
5619 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
5621 let Predicates = [IsBE] in {
5622 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
5623 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
5624 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
5625 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
5626 (REV64v4i32 FPR128:$src), (i32 8)))>;
5627 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
5628 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
5629 (REV64v8i16 FPR128:$src), (i32 8)))>;
5630 def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
5631 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
5632 (REV64v8i16 FPR128:$src), (i32 8)))>;
5633 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
5634 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
5635 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
5636 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
5637 (REV64v4i32 FPR128:$src), (i32 8)))>;
5638 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
5639 (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
5640 (REV64v16i8 FPR128:$src), (i32 8)))>;
5643 let Predicates = [IsLE] in {
5644 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>;
5645 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
5646 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
5647 def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
5648 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
5649 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
5651 let Predicates = [IsBE] in {
5652 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))),
5653 (v2f64 (EXTv16i8 FPR128:$src,
5654 FPR128:$src, (i32 8)))>;
5655 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
5656 (v2f64 (REV64v4i32 FPR128:$src))>;
5657 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
5658 (v2f64 (REV64v8i16 FPR128:$src))>;
5659 def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
5660 (v2f64 (REV64v8i16 FPR128:$src))>;
5661 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
5662 (v2f64 (REV64v16i8 FPR128:$src))>;
5663 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
5664 (v2f64 (REV64v4i32 FPR128:$src))>;
5666 def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
5668 let Predicates = [IsLE] in {
5669 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>;
5670 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
5671 def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
5672 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
5673 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
5674 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
5676 let Predicates = [IsBE] in {
5677 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))),
5678 (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
5679 (REV64v4i32 FPR128:$src), (i32 8)))>;
5680 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
5681 (v4f32 (REV32v8i16 FPR128:$src))>;
5682 def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
5683 (v4f32 (REV32v8i16 FPR128:$src))>;
5684 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
5685 (v4f32 (REV32v16i8 FPR128:$src))>;
5686 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
5687 (v4f32 (REV64v4i32 FPR128:$src))>;
5688 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
5689 (v4f32 (REV64v4i32 FPR128:$src))>;
5691 def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
5693 let Predicates = [IsLE] in {
5694 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>;
5695 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
5696 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
5697 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
5698 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
5699 def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
5701 let Predicates = [IsBE] in {
5702 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))),
5703 (v2i64 (EXTv16i8 FPR128:$src,
5704 FPR128:$src, (i32 8)))>;
5705 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
5706 (v2i64 (REV64v4i32 FPR128:$src))>;
5707 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
5708 (v2i64 (REV64v8i16 FPR128:$src))>;
5709 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
5710 (v2i64 (REV64v16i8 FPR128:$src))>;
5711 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
5712 (v2i64 (REV64v4i32 FPR128:$src))>;
5713 def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
5714 (v2i64 (REV64v8i16 FPR128:$src))>;
5716 def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
5718 let Predicates = [IsLE] in {
5719 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>;
5720 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
5721 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
5722 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
5723 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
5724 def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
5726 let Predicates = [IsBE] in {
5727 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))),
5728 (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
5729 (REV64v4i32 FPR128:$src),
5731 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
5732 (v4i32 (REV64v4i32 FPR128:$src))>;
5733 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
5734 (v4i32 (REV32v8i16 FPR128:$src))>;
5735 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
5736 (v4i32 (REV32v16i8 FPR128:$src))>;
5737 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
5738 (v4i32 (REV64v4i32 FPR128:$src))>;
5739 def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
5740 (v4i32 (REV32v8i16 FPR128:$src))>;
5742 def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
5744 let Predicates = [IsLE] in {
5745 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>;
5746 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
5747 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
5748 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
5749 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
5750 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
5751 def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
5753 let Predicates = [IsBE] in {
5754 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))),
5755 (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
5756 (REV64v8i16 FPR128:$src),
5758 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
5759 (v8i16 (REV64v8i16 FPR128:$src))>;
5760 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
5761 (v8i16 (REV32v8i16 FPR128:$src))>;
5762 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
5763 (v8i16 (REV16v16i8 FPR128:$src))>;
5764 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
5765 (v8i16 (REV64v8i16 FPR128:$src))>;
5766 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
5767 (v8i16 (REV32v8i16 FPR128:$src))>;
5768 def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))),
5769 (v8i16 (REV32v8i16 FPR128:$src))>;
5772 let Predicates = [IsLE] in {
5773 def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))), (v8f16 FPR128:$src)>;
5774 def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
5775 def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
5776 def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
5777 def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
5778 def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
5779 def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
5781 let Predicates = [IsBE] in {
5782 def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))),
5783 (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
5784 (REV64v8i16 FPR128:$src),
5786 def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
5787 (v8f16 (REV64v8i16 FPR128:$src))>;
5788 def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
5789 (v8f16 (REV32v8i16 FPR128:$src))>;
5790 def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))),
5791 (v8f16 (REV64v8i16 FPR128:$src))>;
5792 def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
5793 (v8f16 (REV16v16i8 FPR128:$src))>;
5794 def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
5795 (v8f16 (REV64v8i16 FPR128:$src))>;
5796 def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
5797 (v8f16 (REV32v8i16 FPR128:$src))>;
5800 let Predicates = [IsLE] in {
5801 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>;
5802 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
5803 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
5804 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
5805 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
5806 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
5807 def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
5809 let Predicates = [IsBE] in {
5810 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))),
5811 (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
5812 (REV64v16i8 FPR128:$src),
5814 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
5815 (v16i8 (REV64v16i8 FPR128:$src))>;
5816 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
5817 (v16i8 (REV32v16i8 FPR128:$src))>;
5818 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
5819 (v16i8 (REV16v16i8 FPR128:$src))>;
5820 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
5821 (v16i8 (REV64v16i8 FPR128:$src))>;
5822 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
5823 (v16i8 (REV32v16i8 FPR128:$src))>;
5824 def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
5825 (v16i8 (REV16v16i8 FPR128:$src))>;
5828 def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
5829 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5830 def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
5831 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5832 def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
5833 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5834 def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
5835 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5836 def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
5837 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5838 def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
5839 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5840 def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
5841 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5843 def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
5844 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
5845 def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
5846 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
5847 def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
5848 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
5849 def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
5850 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
5852 // A 64-bit subvector insert to the first 128-bit vector position
5853 // is a subregister copy that needs no instruction.
5854 def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (i32 0)),
5855 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5856 def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (i32 0)),
5857 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5858 def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (i32 0)),
5859 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5860 def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (i32 0)),
5861 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5862 def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (i32 0)),
5863 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5864 def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (i32 0)),
5865 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5866 def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (i32 0)),
5867 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
5869 // Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
5871 def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
5872 (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
5873 (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
5874 def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
5875 (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
5876 (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
5877 // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
5878 // so we match on v4f32 here, not v2f32. This will also catch adding
5879 // the low two lanes of a true v4f32 vector.
5880 def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
5881 (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
5882 (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
5884 // Scalar 64-bit shifts in FPR64 registers.
5885 def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
5886 (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
5887 def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
5888 (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
5889 def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
5890 (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
5891 def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
5892 (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
5894 // Patterns for nontemporal/no-allocate stores.
5895 // We have to resort to tricks to turn a single-input store into a store pair,
5896 // because there is no single-input nontemporal store, only STNP.
5897 let Predicates = [IsLE] in {
5898 let AddedComplexity = 15 in {
5899 class NTStore128Pat<ValueType VT> :
5900 Pat<(nontemporalstore (VT FPR128:$Rt),
5901 (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
5902 (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
5903 (CPYi64 FPR128:$Rt, (i64 1)),
5904 GPR64sp:$Rn, simm7s8:$offset)>;
5906 def : NTStore128Pat<v2i64>;
5907 def : NTStore128Pat<v4i32>;
5908 def : NTStore128Pat<v8i16>;
5909 def : NTStore128Pat<v16i8>;
5911 class NTStore64Pat<ValueType VT> :
5912 Pat<(nontemporalstore (VT FPR64:$Rt),
5913 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
5914 (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
5915 (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
5916 GPR64sp:$Rn, simm7s4:$offset)>;
5918 // FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
5919 def : NTStore64Pat<v1f64>;
5920 def : NTStore64Pat<v1i64>;
5921 def : NTStore64Pat<v2i32>;
5922 def : NTStore64Pat<v4i16>;
5923 def : NTStore64Pat<v8i8>;
5925 def : Pat<(nontemporalstore GPR64:$Rt,
5926 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
5927 (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
5928 (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 0, 31), sub_32),
5929 GPR64sp:$Rn, simm7s4:$offset)>;
5930 } // AddedComplexity=10
5931 } // Predicates = [IsLE]
5933 // Tail call return handling. These are all compiler pseudo-instructions,
5934 // so no encoding information or anything like that.
5935 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
5936 def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff),[]>;
5937 def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>;
5940 def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
5941 (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>;
5942 def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
5943 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
5944 def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
5945 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
5947 include "AArch64InstrAtomics.td"