1 //===- ARMInstrVFP.td - VFP support for ARM -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM VFP instruction set.
12 //===----------------------------------------------------------------------===//
15 SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
17 SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
19 SDTypeProfile<0, 1, [SDTCisFP<0>]>;
21 SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
24 def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
25 def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
26 def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
27 def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
28 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInFlag,SDNPOutFlag]>;
29 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutFlag]>;
30 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0",SDT_CMPFP0, [SDNPOutFlag]>;
31 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
33 //===----------------------------------------------------------------------===//
34 // Operand Definitions.
38 def vfp_f32imm : Operand<f32>,
39 PatLeaf<(f32 fpimm), [{
40 return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
42 let PrintMethod = "printVFPf32ImmOperand";
45 def vfp_f64imm : Operand<f64>,
46 PatLeaf<(f64 fpimm), [{
47 return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
49 let PrintMethod = "printVFPf64ImmOperand";
53 //===----------------------------------------------------------------------===//
54 // Load / store Instructions.
57 let canFoldAsLoad = 1, isReMaterializable = 1 in {
58 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr),
59 IIC_fpLoad64, "vldr", ".64\t$dst, $addr",
60 [(set DPR:$dst, (f64 (load addrmode5:$addr)))]>;
62 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$dst), (ins addrmode5:$addr),
63 IIC_fpLoad32, "vldr", ".32\t$dst, $addr",
64 [(set SPR:$dst, (load addrmode5:$addr))]>;
67 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$src, addrmode5:$addr),
68 IIC_fpStore64, "vstr", ".64\t$src, $addr",
69 [(store (f64 DPR:$src), addrmode5:$addr)]>;
71 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$src, addrmode5:$addr),
72 IIC_fpStore32, "vstr", ".32\t$src, $addr",
73 [(store SPR:$src, addrmode5:$addr)]>;
75 //===----------------------------------------------------------------------===//
76 // Load / store multiple Instructions.
79 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
80 def VLDMD : AXDI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$dsts,
81 variable_ops), IndexModeNone, IIC_fpLoad_m,
82 "vldm${addr:submode}${p}\t$addr, $dsts", "", []> {
86 def VLDMS : AXSI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$dsts,
87 variable_ops), IndexModeNone, IIC_fpLoad_m,
88 "vldm${addr:submode}${p}\t$addr, $dsts", "", []> {
92 def VLDMD_UPD : AXDI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
93 reglist:$dsts, variable_ops),
94 IndexModeUpd, IIC_fpLoad_mu,
95 "vldm${addr:submode}${p}\t$addr!, $dsts",
96 "$addr.addr = $wb", []> {
100 def VLDMS_UPD : AXSI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
101 reglist:$dsts, variable_ops),
102 IndexModeUpd, IIC_fpLoad_mu,
103 "vldm${addr:submode}${p}\t$addr!, $dsts",
104 "$addr.addr = $wb", []> {
107 } // mayLoad, neverHasSideEffects, hasExtraDefRegAllocReq
109 let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
110 def VSTMD : AXDI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$srcs,
111 variable_ops), IndexModeNone, IIC_fpStore_m,
112 "vstm${addr:submode}${p}\t$addr, $srcs", "", []> {
116 def VSTMS : AXSI4<(outs), (ins addrmode4:$addr, pred:$p, reglist:$srcs,
117 variable_ops), IndexModeNone, IIC_fpStore_m,
118 "vstm${addr:submode}${p}\t$addr, $srcs", "", []> {
122 def VSTMD_UPD : AXDI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
123 reglist:$srcs, variable_ops),
124 IndexModeUpd, IIC_fpStore_mu,
125 "vstm${addr:submode}${p}\t$addr!, $srcs",
126 "$addr.addr = $wb", []> {
130 def VSTMS_UPD : AXSI4<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
131 reglist:$srcs, variable_ops),
132 IndexModeUpd, IIC_fpStore_mu,
133 "vstm${addr:submode}${p}\t$addr!, $srcs",
134 "$addr.addr = $wb", []> {
137 } // mayStore, neverHasSideEffects, hasExtraSrcRegAllocReq
139 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
141 //===----------------------------------------------------------------------===//
142 // FP Binary Operations.
145 def VADDD : ADbI<0b11100, 0b11, 0, 0, (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
146 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
147 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]> {
152 let Inst{3-0} = Dm{3-0};
154 let Inst{19-16} = Dn{3-0};
156 let Inst{15-12} = Dd{3-0};
157 let Inst{22} = Dd{4};
160 def VADDS : ASbIn<0b11100, 0b11, 0, 0, (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
161 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
162 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]> {
167 let Inst{3-0} = Sm{4-1};
169 let Inst{19-16} = Sn{4-1};
171 let Inst{15-12} = Sd{4-1};
172 let Inst{22} = Sd{0};
175 // These are encoded as unary instructions.
176 let Defs = [FPSCR] in {
177 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins DPR:$a, DPR:$b),
178 IIC_fpCMP64, "vcmpe", ".f64\t$a, $b",
179 [(arm_cmpfp DPR:$a, (f64 DPR:$b))]>;
181 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins DPR:$a, DPR:$b),
182 IIC_fpCMP64, "vcmp", ".f64\t$a, $b",
183 [/* For disassembly only; pattern left blank */]>;
185 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins SPR:$a, SPR:$b),
186 IIC_fpCMP32, "vcmpe", ".f32\t$a, $b",
187 [(arm_cmpfp SPR:$a, SPR:$b)]>;
189 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins SPR:$a, SPR:$b),
190 IIC_fpCMP32, "vcmp", ".f32\t$a, $b",
191 [/* For disassembly only; pattern left blank */]>;
194 def VDIVD : ADbI<0b11101, 0b00, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
195 IIC_fpDIV64, "vdiv", ".f64\t$dst, $a, $b",
196 [(set DPR:$dst, (fdiv DPR:$a, (f64 DPR:$b)))]>;
198 def VDIVS : ASbI<0b11101, 0b00, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
199 IIC_fpDIV32, "vdiv", ".f32\t$dst, $a, $b",
200 [(set SPR:$dst, (fdiv SPR:$a, SPR:$b))]>;
202 def VMULD : ADbI<0b11100, 0b10, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
203 IIC_fpMUL64, "vmul", ".f64\t$dst, $a, $b",
204 [(set DPR:$dst, (fmul DPR:$a, (f64 DPR:$b)))]>;
206 def VMULS : ASbIn<0b11100, 0b10, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
207 IIC_fpMUL32, "vmul", ".f32\t$dst, $a, $b",
208 [(set SPR:$dst, (fmul SPR:$a, SPR:$b))]>;
210 def VNMULD : ADbI<0b11100, 0b10, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
211 IIC_fpMUL64, "vnmul", ".f64\t$dst, $a, $b",
212 [(set DPR:$dst, (fneg (fmul DPR:$a, (f64 DPR:$b))))]>;
214 def VNMULS : ASbI<0b11100, 0b10, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
215 IIC_fpMUL32, "vnmul", ".f32\t$dst, $a, $b",
216 [(set SPR:$dst, (fneg (fmul SPR:$a, SPR:$b)))]>;
218 // Match reassociated forms only if not sign dependent rounding.
219 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
220 (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
221 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
222 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
225 def VSUBD : ADbI<0b11100, 0b11, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
226 IIC_fpALU64, "vsub", ".f64\t$dst, $a, $b",
227 [(set DPR:$dst, (fsub DPR:$a, (f64 DPR:$b)))]>;
229 def VSUBS : ASbIn<0b11100, 0b11, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
230 IIC_fpALU32, "vsub", ".f32\t$dst, $a, $b",
231 [(set SPR:$dst, (fsub SPR:$a, SPR:$b))]>;
233 //===----------------------------------------------------------------------===//
234 // FP Unary Operations.
237 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
238 IIC_fpUNA64, "vabs", ".f64\t$dst, $a",
239 [(set DPR:$dst, (fabs (f64 DPR:$a)))]>;
241 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,(outs SPR:$dst), (ins SPR:$a),
242 IIC_fpUNA32, "vabs", ".f32\t$dst, $a",
243 [(set SPR:$dst, (fabs SPR:$a))]>;
245 let Defs = [FPSCR] in {
246 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins DPR:$a),
247 IIC_fpCMP64, "vcmpe", ".f64\t$a, #0",
248 [(arm_cmpfp0 (f64 DPR:$a))]>;
250 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins DPR:$a),
251 IIC_fpCMP64, "vcmp", ".f64\t$a, #0",
252 [/* For disassembly only; pattern left blank */]>;
254 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins SPR:$a),
255 IIC_fpCMP32, "vcmpe", ".f32\t$a, #0",
256 [(arm_cmpfp0 SPR:$a)]>;
258 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins SPR:$a),
259 IIC_fpCMP32, "vcmp", ".f32\t$a, #0",
260 [/* For disassembly only; pattern left blank */]>;
263 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0, (outs DPR:$dst), (ins SPR:$a),
264 IIC_fpCVTDS, "vcvt", ".f64.f32\t$dst, $a",
265 [(set DPR:$dst, (fextend SPR:$a))]>;
267 // Special case encoding: bits 11-8 is 0b1011.
268 def VCVTSD : VFPAI<(outs SPR:$dst), (ins DPR:$a), VFPUnaryFrm,
269 IIC_fpCVTSD, "vcvt", ".f32.f64\t$dst, $a",
270 [(set SPR:$dst, (fround DPR:$a))]> {
271 let Inst{27-23} = 0b11101;
272 let Inst{21-16} = 0b110111;
273 let Inst{11-8} = 0b1011;
274 let Inst{7-6} = 0b11;
278 // Between half-precision and single-precision. For disassembly only.
280 def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
281 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$dst, $a",
282 [/* For disassembly only; pattern left blank */]>;
284 def : ARMPat<(f32_to_f16 SPR:$a),
285 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
287 def VCVTBHS: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
288 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$dst, $a",
289 [/* For disassembly only; pattern left blank */]>;
291 def : ARMPat<(f16_to_f32 GPR:$a),
292 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
294 def VCVTTSH: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
295 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$dst, $a",
296 [/* For disassembly only; pattern left blank */]>;
298 def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
299 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$dst, $a",
300 [/* For disassembly only; pattern left blank */]>;
302 let neverHasSideEffects = 1 in {
303 def VMOVD: ADuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
304 IIC_fpUNA64, "vmov", ".f64\t$dst, $a", []>;
306 def VMOVS: ASuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
307 IIC_fpUNA32, "vmov", ".f32\t$dst, $a", []>;
308 } // neverHasSideEffects
310 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
311 IIC_fpUNA64, "vneg", ".f64\t$dst, $a",
312 [(set DPR:$dst, (fneg (f64 DPR:$a)))]>;
314 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,(outs SPR:$dst), (ins SPR:$a),
315 IIC_fpUNA32, "vneg", ".f32\t$dst, $a",
316 [(set SPR:$dst, (fneg SPR:$a))]>;
318 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
319 IIC_fpSQRT64, "vsqrt", ".f64\t$dst, $a",
320 [(set DPR:$dst, (fsqrt (f64 DPR:$a)))]>;
322 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
323 IIC_fpSQRT32, "vsqrt", ".f32\t$dst, $a",
324 [(set SPR:$dst, (fsqrt SPR:$a))]>;
326 //===----------------------------------------------------------------------===//
327 // FP <-> GPR Copies. Int <-> FP Conversions.
330 def VMOVRS : AVConv2I<0b11100001, 0b1010, (outs GPR:$dst), (ins SPR:$src),
331 IIC_fpMOVSI, "vmov", "\t$dst, $src",
332 [(set GPR:$dst, (bitconvert SPR:$src))]>;
334 def VMOVSR : AVConv4I<0b11100000, 0b1010, (outs SPR:$dst), (ins GPR:$src),
335 IIC_fpMOVIS, "vmov", "\t$dst, $src",
336 [(set SPR:$dst, (bitconvert GPR:$src))]>;
338 let neverHasSideEffects = 1 in {
339 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
340 (outs GPR:$wb, GPR:$dst2), (ins DPR:$src),
341 IIC_fpMOVDI, "vmov", "\t$wb, $dst2, $src",
342 [/* FIXME: Can't write pattern for multiple result instr*/]> {
343 let Inst{7-6} = 0b00;
346 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
347 (outs GPR:$wb, GPR:$dst2), (ins SPR:$src1, SPR:$src2),
348 IIC_fpMOVDI, "vmov", "\t$wb, $dst2, $src1, $src2",
349 [/* For disassembly only; pattern left blank */]> {
350 let Inst{7-6} = 0b00;
352 } // neverHasSideEffects
357 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
358 (outs DPR:$dst), (ins GPR:$src1, GPR:$src2),
359 IIC_fpMOVID, "vmov", "\t$dst, $src1, $src2",
360 [(set DPR:$dst, (arm_fmdrr GPR:$src1, GPR:$src2))]> {
361 let Inst{7-6} = 0b00;
364 let neverHasSideEffects = 1 in
365 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
366 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
367 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
368 [/* For disassembly only; pattern left blank */]> {
369 let Inst{7-6} = 0b00;
375 // FMRX : SPR system reg -> GPR
379 // FMXR: GPR -> VFP system reg
384 def VSITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
385 (outs DPR:$dst), (ins SPR:$a),
386 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a",
387 [(set DPR:$dst, (f64 (arm_sitof SPR:$a)))]> {
388 let Inst{7} = 1; // s32
391 def VSITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
392 (outs SPR:$dst),(ins SPR:$a),
393 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a",
394 [(set SPR:$dst, (arm_sitof SPR:$a))]> {
395 let Inst{7} = 1; // s32
398 def VUITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
399 (outs DPR:$dst), (ins SPR:$a),
400 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a",
401 [(set DPR:$dst, (f64 (arm_uitof SPR:$a)))]> {
402 let Inst{7} = 0; // u32
405 def VUITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
406 (outs SPR:$dst), (ins SPR:$a),
407 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a",
408 [(set SPR:$dst, (arm_uitof SPR:$a))]> {
409 let Inst{7} = 0; // u32
413 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
415 def VTOSIZD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
416 (outs SPR:$dst), (ins DPR:$a),
417 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a",
418 [(set SPR:$dst, (arm_ftosi (f64 DPR:$a)))]> {
419 let Inst{7} = 1; // Z bit
422 def VTOSIZS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
423 (outs SPR:$dst), (ins SPR:$a),
424 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a",
425 [(set SPR:$dst, (arm_ftosi SPR:$a))]> {
426 let Inst{7} = 1; // Z bit
429 def VTOUIZD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
430 (outs SPR:$dst), (ins DPR:$a),
431 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a",
432 [(set SPR:$dst, (arm_ftoui (f64 DPR:$a)))]> {
433 let Inst{7} = 1; // Z bit
436 def VTOUIZS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
437 (outs SPR:$dst), (ins SPR:$a),
438 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a",
439 [(set SPR:$dst, (arm_ftoui SPR:$a))]> {
440 let Inst{7} = 1; // Z bit
443 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
444 // For disassembly only.
445 let Uses = [FPSCR] in {
446 def VTOSIRD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
447 (outs SPR:$dst), (ins DPR:$a),
448 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$dst, $a",
449 [(set SPR:$dst, (int_arm_vcvtr (f64 DPR:$a)))]> {
450 let Inst{7} = 0; // Z bit
453 def VTOSIRS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
454 (outs SPR:$dst), (ins SPR:$a),
455 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$dst, $a",
456 [(set SPR:$dst, (int_arm_vcvtr SPR:$a))]> {
457 let Inst{7} = 0; // Z bit
460 def VTOUIRD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
461 (outs SPR:$dst), (ins DPR:$a),
462 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$dst, $a",
463 [(set SPR:$dst, (int_arm_vcvtru (f64 DPR:$a)))]> {
464 let Inst{7} = 0; // Z bit
467 def VTOUIRS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
468 (outs SPR:$dst), (ins SPR:$a),
469 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$dst, $a",
470 [(set SPR:$dst, (int_arm_vcvtru SPR:$a))]> {
471 let Inst{7} = 0; // Z bit
475 // Convert between floating-point and fixed-point
476 // Data type for fixed-point naming convention:
477 // S16 (U=0, sx=0) -> SH
478 // U16 (U=1, sx=0) -> UH
479 // S32 (U=0, sx=1) -> SL
480 // U32 (U=1, sx=1) -> UL
482 let Constraints = "$a = $dst" in {
484 // FP to Fixed-Point:
486 let isCodeGenOnly = 1 in {
487 def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
488 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
489 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
490 [/* For disassembly only; pattern left blank */]>;
492 def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
493 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
494 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
495 [/* For disassembly only; pattern left blank */]>;
497 def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
498 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
499 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
500 [/* For disassembly only; pattern left blank */]>;
502 def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
503 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
504 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
505 [/* For disassembly only; pattern left blank */]>;
507 def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
508 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
509 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits",
510 [/* For disassembly only; pattern left blank */]>;
512 def VTOUHD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 0,
513 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
514 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits",
515 [/* For disassembly only; pattern left blank */]>;
517 def VTOSLD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 1,
518 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
519 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits",
520 [/* For disassembly only; pattern left blank */]>;
522 def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
523 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
524 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
525 [/* For disassembly only; pattern left blank */]>;
528 // Fixed-Point to FP:
530 let isCodeGenOnly = 1 in {
531 def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
532 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
533 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
534 [/* For disassembly only; pattern left blank */]>;
536 def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
537 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
538 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
539 [/* For disassembly only; pattern left blank */]>;
541 def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
542 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
543 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
544 [/* For disassembly only; pattern left blank */]>;
546 def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
547 (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
548 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
549 [/* For disassembly only; pattern left blank */]>;
551 def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
552 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
553 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits",
554 [/* For disassembly only; pattern left blank */]>;
556 def VUHTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 0,
557 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
558 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits",
559 [/* For disassembly only; pattern left blank */]>;
561 def VSLTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 1,
562 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
563 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits",
564 [/* For disassembly only; pattern left blank */]>;
566 def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
567 (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
568 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
569 [/* For disassembly only; pattern left blank */]>;
572 } // End of 'let Constraints = "$src = $dst" in'
574 //===----------------------------------------------------------------------===//
575 // FP FMA Operations.
578 def VMLAD : ADbI_vmlX<0b11100, 0b00, 0, 0,
579 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
580 IIC_fpMAC64, "vmla", ".f64\t$dst, $a, $b",
581 [(set DPR:$dst, (fadd (fmul DPR:$a, DPR:$b),
582 (f64 DPR:$dstin)))]>,
583 RegConstraint<"$dstin = $dst">;
585 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
586 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
587 IIC_fpMAC32, "vmla", ".f32\t$dst, $a, $b",
588 [(set SPR:$dst, (fadd (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
589 RegConstraint<"$dstin = $dst">;
591 def VNMLSD : ADbI_vmlX<0b11100, 0b01, 0, 0,
592 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
593 IIC_fpMAC64, "vnmls", ".f64\t$dst, $a, $b",
594 [(set DPR:$dst, (fsub (fmul DPR:$a, DPR:$b),
595 (f64 DPR:$dstin)))]>,
596 RegConstraint<"$dstin = $dst">;
598 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
599 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
600 IIC_fpMAC32, "vnmls", ".f32\t$dst, $a, $b",
601 [(set SPR:$dst, (fsub (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
602 RegConstraint<"$dstin = $dst">;
604 def VMLSD : ADbI_vmlX<0b11100, 0b00, 1, 0,
605 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
606 IIC_fpMAC64, "vmls", ".f64\t$dst, $a, $b",
607 [(set DPR:$dst, (fadd (fneg (fmul DPR:$a, DPR:$b)),
608 (f64 DPR:$dstin)))]>,
609 RegConstraint<"$dstin = $dst">;
611 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
612 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
613 IIC_fpMAC32, "vmls", ".f32\t$dst, $a, $b",
614 [(set SPR:$dst, (fadd (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
615 RegConstraint<"$dstin = $dst">;
617 def : Pat<(fsub DPR:$dstin, (fmul DPR:$a, (f64 DPR:$b))),
618 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, Requires<[DontUseNEONForFP]>;
619 def : Pat<(fsub SPR:$dstin, (fmul SPR:$a, SPR:$b)),
620 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, Requires<[DontUseNEONForFP]>;
622 def VNMLAD : ADbI_vmlX<0b11100, 0b01, 1, 0,
623 (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
624 IIC_fpMAC64, "vnmla", ".f64\t$dst, $a, $b",
625 [(set DPR:$dst, (fsub (fneg (fmul DPR:$a, DPR:$b)),
626 (f64 DPR:$dstin)))]>,
627 RegConstraint<"$dstin = $dst">;
629 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
630 (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
631 IIC_fpMAC32, "vnmla", ".f32\t$dst, $a, $b",
632 [(set SPR:$dst, (fsub (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
633 RegConstraint<"$dstin = $dst">;
635 //===----------------------------------------------------------------------===//
636 // FP Conditional moves.
639 let neverHasSideEffects = 1 in {
640 def VMOVDcc : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
641 (outs DPR:$dst), (ins DPR:$false, DPR:$true),
642 IIC_fpUNA64, "vmov", ".f64\t$dst, $true",
643 [/*(set DPR:$dst, (ARMcmov DPR:$false, DPR:$true, imm:$cc))*/]>,
644 RegConstraint<"$false = $dst">;
646 def VMOVScc : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
647 (outs SPR:$dst), (ins SPR:$false, SPR:$true),
648 IIC_fpUNA32, "vmov", ".f32\t$dst, $true",
649 [/*(set SPR:$dst, (ARMcmov SPR:$false, SPR:$true, imm:$cc))*/]>,
650 RegConstraint<"$false = $dst">;
652 def VNEGDcc : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
653 (outs DPR:$dst), (ins DPR:$false, DPR:$true),
654 IIC_fpUNA64, "vneg", ".f64\t$dst, $true",
655 [/*(set DPR:$dst, (ARMcneg DPR:$false, DPR:$true, imm:$cc))*/]>,
656 RegConstraint<"$false = $dst">;
658 def VNEGScc : ASuI<0b11101, 0b11, 0b0001, 0b01, 0,
659 (outs SPR:$dst), (ins SPR:$false, SPR:$true),
660 IIC_fpUNA32, "vneg", ".f32\t$dst, $true",
661 [/*(set SPR:$dst, (ARMcneg SPR:$false, SPR:$true, imm:$cc))*/]>,
662 RegConstraint<"$false = $dst">;
663 } // neverHasSideEffects
665 //===----------------------------------------------------------------------===//
669 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
671 let Defs = [CPSR], Uses = [FPSCR] in
672 def FMSTAT : VFPAI<(outs), (ins), VFPMiscFrm, IIC_fpSTAT, "vmrs",
673 "\tapsr_nzcv, fpscr",
675 let Inst{27-20} = 0b11101111;
676 let Inst{19-16} = 0b0001;
677 let Inst{15-12} = 0b1111;
678 let Inst{11-8} = 0b1010;
683 // FPSCR <-> GPR (for disassembly only)
684 let hasSideEffects = 1, Uses = [FPSCR] in
685 def VMRS : VFPAI<(outs GPR:$dst), (ins), VFPMiscFrm, IIC_fpSTAT,
686 "vmrs", "\t$dst, fpscr",
687 [(set GPR:$dst, (int_arm_get_fpscr))]> {
688 let Inst{27-20} = 0b11101111;
689 let Inst{19-16} = 0b0001;
690 let Inst{11-8} = 0b1010;
695 let Defs = [FPSCR] in
696 def VMSR : VFPAI<(outs), (ins GPR:$src), VFPMiscFrm, IIC_fpSTAT,
697 "vmsr", "\tfpscr, $src",
698 [(int_arm_set_fpscr GPR:$src)]> {
699 let Inst{27-20} = 0b11101110;
700 let Inst{19-16} = 0b0001;
701 let Inst{11-8} = 0b1010;
706 // Materialize FP immediates. VFP3 only.
707 let isReMaterializable = 1 in {
708 def FCONSTD : VFPAI<(outs DPR:$dst), (ins vfp_f64imm:$imm),
709 VFPMiscFrm, IIC_fpUNA64,
710 "vmov", ".f64\t$dst, $imm",
711 [(set DPR:$dst, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
712 let Inst{27-23} = 0b11101;
713 let Inst{21-20} = 0b11;
714 let Inst{11-9} = 0b101;
716 let Inst{7-4} = 0b0000;
719 def FCONSTS : VFPAI<(outs SPR:$dst), (ins vfp_f32imm:$imm),
720 VFPMiscFrm, IIC_fpUNA32,
721 "vmov", ".f32\t$dst, $imm",
722 [(set SPR:$dst, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
723 let Inst{27-23} = 0b11101;
724 let Inst{21-20} = 0b11;
725 let Inst{11-9} = 0b101;
727 let Inst{7-4} = 0b0000;