1 //===-- ARM64ISelDAGToDAG.cpp - A dag to dag inst selector for ARM64 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM64 target.
12 //===----------------------------------------------------------------------===//
14 #include "ARM64TargetMachine.h"
15 #include "MCTargetDesc/ARM64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
28 #define DEBUG_TYPE "arm64-isel"
30 //===--------------------------------------------------------------------===//
31 /// ARM64DAGToDAGISel - ARM64 specific code to select ARM64 machine
32 /// instructions for SelectionDAG operations.
36 class ARM64DAGToDAGISel : public SelectionDAGISel {
37 ARM64TargetMachine &TM;
39 /// Subtarget - Keep a pointer to the ARM64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const ARM64Subtarget *Subtarget;
46 explicit ARM64DAGToDAGISel(ARM64TargetMachine &tm, CodeGenOpt::Level OptLevel)
47 : SelectionDAGISel(tm, OptLevel), TM(tm),
48 Subtarget(&TM.getSubtarget<ARM64Subtarget>()), ForCodeSize(false) {}
50 virtual const char *getPassName() const {
51 return "ARM64 Instruction Selection";
54 virtual bool runOnMachineFunction(MachineFunction &MF) {
55 AttributeSet FnAttrs = MF.getFunction()->getAttributes();
57 FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
58 Attribute::OptimizeForSize) ||
59 FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
60 return SelectionDAGISel::runOnMachineFunction(MF);
63 SDNode *Select(SDNode *Node);
65 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
66 /// inline asm expressions.
67 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
69 std::vector<SDValue> &OutOps);
71 SDNode *SelectMLAV64LaneV128(SDNode *N);
72 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
73 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
74 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
75 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
76 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
77 return SelectShiftedRegister(N, false, Reg, Shift);
79 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
80 return SelectShiftedRegister(N, true, Reg, Shift);
82 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
83 return SelectAddrModeIndexed(N, 1, Base, OffImm);
85 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
86 return SelectAddrModeIndexed(N, 2, Base, OffImm);
88 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
89 return SelectAddrModeIndexed(N, 4, Base, OffImm);
91 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
92 return SelectAddrModeIndexed(N, 8, Base, OffImm);
94 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
95 return SelectAddrModeIndexed(N, 16, Base, OffImm);
97 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
98 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
100 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
101 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
103 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
104 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
106 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
107 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
109 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
110 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
113 bool SelectAddrModeRO8(SDValue N, SDValue &Base, SDValue &Offset,
115 return SelectAddrModeRO(N, 1, Base, Offset, Imm);
117 bool SelectAddrModeRO16(SDValue N, SDValue &Base, SDValue &Offset,
119 return SelectAddrModeRO(N, 2, Base, Offset, Imm);
121 bool SelectAddrModeRO32(SDValue N, SDValue &Base, SDValue &Offset,
123 return SelectAddrModeRO(N, 4, Base, Offset, Imm);
125 bool SelectAddrModeRO64(SDValue N, SDValue &Base, SDValue &Offset,
127 return SelectAddrModeRO(N, 8, Base, Offset, Imm);
129 bool SelectAddrModeRO128(SDValue N, SDValue &Base, SDValue &Offset,
131 return SelectAddrModeRO(N, 16, Base, Offset, Imm);
133 bool SelectAddrModeNoIndex(SDValue N, SDValue &Val);
135 /// Form sequences of consecutive 64/128-bit registers for use in NEON
136 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
137 /// between 1 and 4 elements. If it contains a single element that is returned
138 /// unchanged; otherwise a REG_SEQUENCE value is returned.
139 SDValue createDTuple(ArrayRef<SDValue> Vecs);
140 SDValue createQTuple(ArrayRef<SDValue> Vecs);
142 /// Generic helper for the createDTuple/createQTuple
143 /// functions. Those should almost always be called instead.
144 SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
147 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
149 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
151 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
153 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
155 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
156 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
158 SDNode *SelectSIMDAddSubNarrowing(unsigned IntNo, SDNode *Node);
159 SDNode *SelectSIMDXtnNarrowing(unsigned IntNo, SDNode *Node);
161 SDNode *SelectBitfieldExtractOp(SDNode *N);
162 SDNode *SelectBitfieldInsertOp(SDNode *N);
164 SDNode *SelectLIBM(SDNode *N);
166 // Include the pieces autogenerated from the target description.
167 #include "ARM64GenDAGISel.inc"
170 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
172 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
174 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
176 bool SelectAddrModeRO(SDValue N, unsigned Size, SDValue &Base,
177 SDValue &Offset, SDValue &Imm);
178 bool isWorthFolding(SDValue V) const;
179 bool SelectExtendedSHL(SDValue N, unsigned Size, SDValue &Offset,
182 template<unsigned RegWidth>
183 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
184 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
187 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
189 } // end anonymous namespace
191 /// isIntImmediate - This method tests to see if the node is a constant
192 /// operand. If so Imm will receive the 32-bit value.
193 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
194 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
195 Imm = C->getZExtValue();
201 // isIntImmediate - This method tests to see if a constant operand.
202 // If so Imm will receive the value.
203 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
204 return isIntImmediate(N.getNode(), Imm);
207 // isOpcWithIntImmediate - This method tests to see if the node is a specific
208 // opcode and that it has a immediate integer right operand.
209 // If so Imm will receive the 32 bit value.
210 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
212 return N->getOpcode() == Opc &&
213 isIntImmediate(N->getOperand(1).getNode(), Imm);
216 bool ARM64DAGToDAGISel::SelectAddrModeNoIndex(SDValue N, SDValue &Val) {
217 EVT ValTy = N.getValueType();
218 if (ValTy != MVT::i64)
224 bool ARM64DAGToDAGISel::SelectInlineAsmMemoryOperand(
225 const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) {
226 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
227 // Require the address to be in a register. That is safe for all ARM64
228 // variants and it is hard to do anything much smarter without knowing
229 // how the operand is used.
230 OutOps.push_back(Op);
234 /// SelectArithImmed - Select an immediate value that can be represented as
235 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
236 /// Val set to the 12-bit value and Shift set to the shifter operand.
237 bool ARM64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
239 // This function is called from the addsub_shifted_imm ComplexPattern,
240 // which lists [imm] as the list of opcode it's interested in, however
241 // we still need to check whether the operand is actually an immediate
242 // here because the ComplexPattern opcode list is only used in
243 // root-level opcode matching.
244 if (!isa<ConstantSDNode>(N.getNode()))
247 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
250 if (Immed >> 12 == 0) {
252 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
258 unsigned ShVal = ARM64_AM::getShifterImm(ARM64_AM::LSL, ShiftAmt);
259 Val = CurDAG->getTargetConstant(Immed, MVT::i32);
260 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
264 /// SelectNegArithImmed - As above, but negates the value before trying to
266 bool ARM64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
268 // This function is called from the addsub_shifted_imm ComplexPattern,
269 // which lists [imm] as the list of opcode it's interested in, however
270 // we still need to check whether the operand is actually an immediate
271 // here because the ComplexPattern opcode list is only used in
272 // root-level opcode matching.
273 if (!isa<ConstantSDNode>(N.getNode()))
276 // The immediate operand must be a 24-bit zero-extended immediate.
277 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
279 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
280 // have the opposite effect on the C flag, so this pattern mustn't match under
281 // those circumstances.
285 if (N.getValueType() == MVT::i32)
286 Immed = ~((uint32_t)Immed) + 1;
288 Immed = ~Immed + 1ULL;
289 if (Immed & 0xFFFFFFFFFF000000ULL)
292 Immed &= 0xFFFFFFULL;
293 return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
296 /// getShiftTypeForNode - Translate a shift node to the corresponding
298 static ARM64_AM::ShiftType getShiftTypeForNode(SDValue N) {
299 switch (N.getOpcode()) {
301 return ARM64_AM::InvalidShift;
303 return ARM64_AM::LSL;
305 return ARM64_AM::LSR;
307 return ARM64_AM::ASR;
309 return ARM64_AM::ROR;
313 /// \brief Determine wether it is worth to fold V into an extended register.
314 bool ARM64DAGToDAGISel::isWorthFolding(SDValue V) const {
315 // it hurts if the a value is used at least twice, unless we are optimizing
317 if (ForCodeSize || V.hasOneUse())
322 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
323 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
324 /// instructions allow the shifted register to be rotated, but the arithmetic
325 /// instructions do not. The AllowROR parameter specifies whether ROR is
327 bool ARM64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
328 SDValue &Reg, SDValue &Shift) {
329 ARM64_AM::ShiftType ShType = getShiftTypeForNode(N);
330 if (ShType == ARM64_AM::InvalidShift)
332 if (!AllowROR && ShType == ARM64_AM::ROR)
335 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
336 unsigned BitSize = N.getValueType().getSizeInBits();
337 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
338 unsigned ShVal = ARM64_AM::getShifterImm(ShType, Val);
340 Reg = N.getOperand(0);
341 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
342 return isWorthFolding(N);
348 /// getExtendTypeForNode - Translate an extend node to the corresponding
349 /// ExtendType value.
350 static ARM64_AM::ExtendType getExtendTypeForNode(SDValue N,
351 bool IsLoadStore = false) {
352 if (N.getOpcode() == ISD::SIGN_EXTEND ||
353 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
355 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
356 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
358 SrcVT = N.getOperand(0).getValueType();
360 if (!IsLoadStore && SrcVT == MVT::i8)
361 return ARM64_AM::SXTB;
362 else if (!IsLoadStore && SrcVT == MVT::i16)
363 return ARM64_AM::SXTH;
364 else if (SrcVT == MVT::i32)
365 return ARM64_AM::SXTW;
366 else if (SrcVT == MVT::i64)
367 return ARM64_AM::SXTX;
369 return ARM64_AM::InvalidExtend;
370 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
371 N.getOpcode() == ISD::ANY_EXTEND) {
372 EVT SrcVT = N.getOperand(0).getValueType();
373 if (!IsLoadStore && SrcVT == MVT::i8)
374 return ARM64_AM::UXTB;
375 else if (!IsLoadStore && SrcVT == MVT::i16)
376 return ARM64_AM::UXTH;
377 else if (SrcVT == MVT::i32)
378 return ARM64_AM::UXTW;
379 else if (SrcVT == MVT::i64)
380 return ARM64_AM::UXTX;
382 return ARM64_AM::InvalidExtend;
383 } else if (N.getOpcode() == ISD::AND) {
384 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
386 return ARM64_AM::InvalidExtend;
387 uint64_t AndMask = CSD->getZExtValue();
391 return ARM64_AM::InvalidExtend;
393 return !IsLoadStore ? ARM64_AM::UXTB : ARM64_AM::InvalidExtend;
395 return !IsLoadStore ? ARM64_AM::UXTH : ARM64_AM::InvalidExtend;
397 return ARM64_AM::UXTW;
401 return ARM64_AM::InvalidExtend;
404 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
405 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
406 if (DL->getOpcode() != ARM64ISD::DUPLANE16 &&
407 DL->getOpcode() != ARM64ISD::DUPLANE32)
410 SDValue SV = DL->getOperand(0);
411 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
414 SDValue EV = SV.getOperand(1);
415 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
418 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
419 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
420 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
421 LaneOp = EV.getOperand(0);
426 // Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
427 // high lane extract.
428 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
429 SDValue &LaneOp, int &LaneIdx) {
431 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
433 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
440 /// SelectMLAV64LaneV128 - ARM64 supports vector MLAs where one multiplicand is
441 /// a lane in the upper half of a 128-bit vector. Recognize and select this so
442 /// that we don't emit unnecessary lane extracts.
443 SDNode *ARM64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
444 SDValue Op0 = N->getOperand(0);
445 SDValue Op1 = N->getOperand(1);
446 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
447 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
448 int LaneIdx = -1; // Will hold the lane index.
450 if (Op1.getOpcode() != ISD::MUL ||
451 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
454 if (Op1.getOpcode() != ISD::MUL ||
455 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
460 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
462 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
464 unsigned MLAOpc = ~0U;
466 switch (N->getSimpleValueType(0).SimpleTy) {
468 llvm_unreachable("Unrecognized MLA.");
470 MLAOpc = ARM64::MLAv4i16_indexed;
473 MLAOpc = ARM64::MLAv8i16_indexed;
476 MLAOpc = ARM64::MLAv2i32_indexed;
479 MLAOpc = ARM64::MLAv4i32_indexed;
483 return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
486 SDNode *ARM64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
491 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
495 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
497 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
499 unsigned SMULLOpc = ~0U;
501 if (IntNo == Intrinsic::arm64_neon_smull) {
502 switch (N->getSimpleValueType(0).SimpleTy) {
504 llvm_unreachable("Unrecognized SMULL.");
506 SMULLOpc = ARM64::SMULLv4i16_indexed;
509 SMULLOpc = ARM64::SMULLv2i32_indexed;
512 } else if (IntNo == Intrinsic::arm64_neon_umull) {
513 switch (N->getSimpleValueType(0).SimpleTy) {
515 llvm_unreachable("Unrecognized SMULL.");
517 SMULLOpc = ARM64::UMULLv4i16_indexed;
520 SMULLOpc = ARM64::UMULLv2i32_indexed;
524 llvm_unreachable("Unrecognized intrinsic.");
526 return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
529 /// SelectArithExtendedRegister - Select a "extended register" operand. This
530 /// operand folds in an extend followed by an optional left shift.
531 bool ARM64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
533 unsigned ShiftVal = 0;
534 ARM64_AM::ExtendType Ext;
536 if (N.getOpcode() == ISD::SHL) {
537 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
540 ShiftVal = CSD->getZExtValue();
544 Ext = getExtendTypeForNode(N.getOperand(0));
545 if (Ext == ARM64_AM::InvalidExtend)
548 Reg = N.getOperand(0).getOperand(0);
550 Ext = getExtendTypeForNode(N);
551 if (Ext == ARM64_AM::InvalidExtend)
554 Reg = N.getOperand(0);
557 // ARM64 mandates that the RHS of the operation must use the smallest
558 // register classs that could contain the size being extended from. Thus,
559 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
560 // there might not be an actual 32-bit value in the program. We can
561 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
562 if (Reg.getValueType() == MVT::i64 && Ext != ARM64_AM::UXTX &&
563 Ext != ARM64_AM::SXTX) {
564 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
565 MachineSDNode *Node = CurDAG->getMachineNode(
566 TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32, Reg, SubReg);
567 Reg = SDValue(Node, 0);
570 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
571 return isWorthFolding(N);
574 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
575 /// immediate" address. The "Size" argument is the size in bytes of the memory
576 /// reference, which determines the scale.
577 bool ARM64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
578 SDValue &Base, SDValue &OffImm) {
579 const TargetLowering *TLI = getTargetLowering();
580 if (N.getOpcode() == ISD::FrameIndex) {
581 int FI = cast<FrameIndexSDNode>(N)->getIndex();
582 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
583 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
587 if (N.getOpcode() == ARM64ISD::ADDlow) {
588 GlobalAddressSDNode *GAN =
589 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
590 Base = N.getOperand(0);
591 OffImm = N.getOperand(1);
595 const GlobalValue *GV = GAN->getGlobal();
596 unsigned Alignment = GV->getAlignment();
597 const DataLayout *DL = TLI->getDataLayout();
598 if (Alignment == 0 && !Subtarget->isTargetDarwin())
599 Alignment = DL->getABITypeAlignment(GV->getType()->getElementType());
601 if (Alignment >= Size)
605 if (CurDAG->isBaseWithConstantOffset(N)) {
606 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
607 int64_t RHSC = (int64_t)RHS->getZExtValue();
608 unsigned Scale = Log2_32(Size);
609 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
610 Base = N.getOperand(0);
611 if (Base.getOpcode() == ISD::FrameIndex) {
612 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
613 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
615 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
621 // Before falling back to our general case, check if the unscaled
622 // instructions can handle this. If so, that's preferable.
623 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
626 // Base only. The address will be materialized into a register before
627 // the memory is accessed.
628 // add x0, Xbase, #offset
631 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
635 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
636 /// immediate" address. This should only match when there is an offset that
637 /// is not valid for a scaled immediate addressing mode. The "Size" argument
638 /// is the size in bytes of the memory reference, which is needed here to know
639 /// what is valid for a scaled immediate.
640 bool ARM64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
641 SDValue &Base, SDValue &OffImm) {
642 if (!CurDAG->isBaseWithConstantOffset(N))
644 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
645 int64_t RHSC = RHS->getSExtValue();
646 // If the offset is valid as a scaled immediate, don't match here.
647 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
648 RHSC < (0x1000 << Log2_32(Size)))
650 if (RHSC >= -256 && RHSC < 256) {
651 Base = N.getOperand(0);
652 if (Base.getOpcode() == ISD::FrameIndex) {
653 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
654 const TargetLowering *TLI = getTargetLowering();
655 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
657 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
664 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
665 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
666 SDValue ImpDef = SDValue(
667 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
669 MachineSDNode *Node = CurDAG->getMachineNode(
670 TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
671 return SDValue(Node, 0);
674 static SDValue WidenIfNeeded(SelectionDAG *CurDAG, SDValue N) {
675 if (N.getValueType() == MVT::i32) {
676 return Widen(CurDAG, N);
682 /// \brief Check if the given SHL node (\p N), can be used to form an
683 /// extended register for an addressing mode.
684 bool ARM64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
685 SDValue &Offset, SDValue &Imm) {
686 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
687 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
688 if (CSD && (CSD->getZExtValue() & 0x7) == CSD->getZExtValue()) {
690 ARM64_AM::ExtendType Ext = getExtendTypeForNode(N.getOperand(0), true);
691 if (Ext == ARM64_AM::InvalidExtend) {
692 Ext = ARM64_AM::UXTX;
693 Offset = WidenIfNeeded(CurDAG, N.getOperand(0));
695 Offset = WidenIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
698 unsigned LegalShiftVal = Log2_32(Size);
699 unsigned ShiftVal = CSD->getZExtValue();
701 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
704 Imm = CurDAG->getTargetConstant(
705 ARM64_AM::getMemExtendImm(Ext, ShiftVal != 0), MVT::i32);
706 if (isWorthFolding(N))
712 bool ARM64DAGToDAGISel::SelectAddrModeRO(SDValue N, unsigned Size,
713 SDValue &Base, SDValue &Offset,
715 if (N.getOpcode() != ISD::ADD)
717 SDValue LHS = N.getOperand(0);
718 SDValue RHS = N.getOperand(1);
720 // We don't want to match immediate adds here, because they are better lowered
721 // to the register-immediate addressing modes.
722 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
725 // Check if this particular node is reused in any non-memory related
726 // operation. If yes, do not try to fold this node into the address
727 // computation, since the computation will be kept.
728 const SDNode *Node = N.getNode();
729 for (SDNode *UI : Node->uses()) {
730 if (!isa<MemSDNode>(*UI))
734 // Remember if it is worth folding N when it produces extended register.
735 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
737 // Try to match a shifted extend on the RHS.
738 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
739 SelectExtendedSHL(RHS, Size, Offset, Imm)) {
744 // Try to match a shifted extend on the LHS.
745 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
746 SelectExtendedSHL(LHS, Size, Offset, Imm)) {
751 ARM64_AM::ExtendType Ext = ARM64_AM::UXTX;
752 // Try to match an unshifted extend on the LHS.
753 if (IsExtendedRegisterWorthFolding &&
754 (Ext = getExtendTypeForNode(LHS, true)) != ARM64_AM::InvalidExtend) {
756 Offset = WidenIfNeeded(CurDAG, LHS.getOperand(0));
757 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
759 if (isWorthFolding(LHS))
763 // Try to match an unshifted extend on the RHS.
764 if (IsExtendedRegisterWorthFolding &&
765 (Ext = getExtendTypeForNode(RHS, true)) != ARM64_AM::InvalidExtend) {
767 Offset = WidenIfNeeded(CurDAG, RHS.getOperand(0));
768 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
770 if (isWorthFolding(RHS))
774 // Match any non-shifted, non-extend, non-immediate add expression.
776 Offset = WidenIfNeeded(CurDAG, RHS);
777 Ext = ARM64_AM::UXTX;
778 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
780 // Reg1 + Reg2 is free: no check needed.
784 SDValue ARM64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
785 static unsigned RegClassIDs[] = { ARM64::DDRegClassID, ARM64::DDDRegClassID,
786 ARM64::DDDDRegClassID };
787 static unsigned SubRegs[] = { ARM64::dsub0, ARM64::dsub1,
788 ARM64::dsub2, ARM64::dsub3 };
790 return createTuple(Regs, RegClassIDs, SubRegs);
793 SDValue ARM64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
794 static unsigned RegClassIDs[] = { ARM64::QQRegClassID, ARM64::QQQRegClassID,
795 ARM64::QQQQRegClassID };
796 static unsigned SubRegs[] = { ARM64::qsub0, ARM64::qsub1,
797 ARM64::qsub2, ARM64::qsub3 };
799 return createTuple(Regs, RegClassIDs, SubRegs);
802 SDValue ARM64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
803 unsigned RegClassIDs[],
804 unsigned SubRegs[]) {
805 // There's no special register-class for a vector-list of 1 element: it's just
807 if (Regs.size() == 1)
810 assert(Regs.size() >= 2 && Regs.size() <= 4);
812 SDLoc DL(Regs[0].getNode());
814 SmallVector<SDValue, 4> Ops;
816 // First operand of REG_SEQUENCE is the desired RegClass.
818 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
820 // Then we get pairs of source & subregister-position for the components.
821 for (unsigned i = 0; i < Regs.size(); ++i) {
822 Ops.push_back(Regs[i]);
823 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
827 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
828 return SDValue(N, 0);
831 SDNode *ARM64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
832 unsigned Opc, bool isExt) {
834 EVT VT = N->getValueType(0);
836 unsigned ExtOff = isExt;
838 // Form a REG_SEQUENCE to force register allocation.
839 unsigned Vec0Off = ExtOff + 1;
840 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
841 N->op_begin() + Vec0Off + NumVecs);
842 SDValue RegSeq = createQTuple(Regs);
844 SmallVector<SDValue, 6> Ops;
846 Ops.push_back(N->getOperand(1));
847 Ops.push_back(RegSeq);
848 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
849 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
852 SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
853 LoadSDNode *LD = cast<LoadSDNode>(N);
854 if (LD->isUnindexed())
856 EVT VT = LD->getMemoryVT();
857 EVT DstVT = N->getValueType(0);
858 ISD::MemIndexedMode AM = LD->getAddressingMode();
859 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
861 // We're not doing validity checking here. That was done when checking
862 // if we should mark the load as indexed or not. We're just selecting
863 // the right instruction.
866 ISD::LoadExtType ExtType = LD->getExtensionType();
867 bool InsertTo64 = false;
869 Opcode = IsPre ? ARM64::LDRXpre_isel : ARM64::LDRXpost_isel;
870 else if (VT == MVT::i32) {
871 if (ExtType == ISD::NON_EXTLOAD)
872 Opcode = IsPre ? ARM64::LDRWpre_isel : ARM64::LDRWpost_isel;
873 else if (ExtType == ISD::SEXTLOAD)
874 Opcode = IsPre ? ARM64::LDRSWpre_isel : ARM64::LDRSWpost_isel;
876 Opcode = IsPre ? ARM64::LDRWpre_isel : ARM64::LDRWpost_isel;
878 // The result of the load is only i32. It's the subreg_to_reg that makes
882 } else if (VT == MVT::i16) {
883 if (ExtType == ISD::SEXTLOAD) {
884 if (DstVT == MVT::i64)
885 Opcode = IsPre ? ARM64::LDRSHXpre_isel : ARM64::LDRSHXpost_isel;
887 Opcode = IsPre ? ARM64::LDRSHWpre_isel : ARM64::LDRSHWpost_isel;
889 Opcode = IsPre ? ARM64::LDRHHpre_isel : ARM64::LDRHHpost_isel;
890 InsertTo64 = DstVT == MVT::i64;
891 // The result of the load is only i32. It's the subreg_to_reg that makes
895 } else if (VT == MVT::i8) {
896 if (ExtType == ISD::SEXTLOAD) {
897 if (DstVT == MVT::i64)
898 Opcode = IsPre ? ARM64::LDRSBXpre_isel : ARM64::LDRSBXpost_isel;
900 Opcode = IsPre ? ARM64::LDRSBWpre_isel : ARM64::LDRSBWpost_isel;
902 Opcode = IsPre ? ARM64::LDRBBpre_isel : ARM64::LDRBBpost_isel;
903 InsertTo64 = DstVT == MVT::i64;
904 // The result of the load is only i32. It's the subreg_to_reg that makes
908 } else if (VT == MVT::f32) {
909 Opcode = IsPre ? ARM64::LDRSpre_isel : ARM64::LDRSpost_isel;
910 } else if (VT == MVT::f64) {
911 Opcode = IsPre ? ARM64::LDRDpre_isel : ARM64::LDRDpost_isel;
914 SDValue Chain = LD->getChain();
915 SDValue Base = LD->getBasePtr();
916 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
917 int OffsetVal = (int)OffsetOp->getZExtValue();
918 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
919 SDValue Ops[] = { Base, Offset, Chain };
920 SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), DstVT, MVT::i64,
922 // Either way, we're replacing the node, so tell the caller that.
925 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
926 SDNode *Sub = CurDAG->getMachineNode(
927 ARM64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
928 CurDAG->getTargetConstant(0, MVT::i64), SDValue(Res, 0), SubReg);
929 ReplaceUses(SDValue(N, 0), SDValue(Sub, 0));
930 ReplaceUses(SDValue(N, 1), SDValue(Res, 1));
931 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
937 SDNode *ARM64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
938 unsigned SubRegIdx) {
940 EVT VT = N->getValueType(0);
941 SDValue Chain = N->getOperand(0);
943 SmallVector<SDValue, 6> Ops;
944 Ops.push_back(N->getOperand(2)); // Mem operand;
945 Ops.push_back(Chain);
947 std::vector<EVT> ResTys;
948 ResTys.push_back(MVT::Untyped);
949 ResTys.push_back(MVT::Other);
951 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
952 SDValue SuperReg = SDValue(Ld, 0);
954 // MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
955 // MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
956 // cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
960 ReplaceUses(SDValue(N, 3), CurDAG->getTargetExtractSubreg(SubRegIdx + 3, dl,
964 ReplaceUses(SDValue(N, 2), CurDAG->getTargetExtractSubreg(SubRegIdx + 2, dl,
968 ReplaceUses(SDValue(N, 1), CurDAG->getTargetExtractSubreg(SubRegIdx + 1, dl,
970 ReplaceUses(SDValue(N, 0),
971 CurDAG->getTargetExtractSubreg(SubRegIdx, dl, VT, SuperReg));
974 ReplaceUses(SDValue(N, 0), SuperReg);
978 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
983 SDNode *ARM64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
986 EVT VT = N->getOperand(2)->getValueType(0);
988 // Form a REG_SEQUENCE to force register allocation.
989 bool Is128Bit = VT.getSizeInBits() == 128;
990 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
991 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
993 SmallVector<SDValue, 6> Ops;
994 Ops.push_back(RegSeq);
995 Ops.push_back(N->getOperand(NumVecs + 2));
996 Ops.push_back(N->getOperand(0));
997 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1002 /// WidenVector - Given a value in the V64 register class, produce the
1003 /// equivalent value in the V128 register class.
1008 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1010 SDValue operator()(SDValue V64Reg) {
1011 EVT VT = V64Reg.getValueType();
1012 unsigned NarrowSize = VT.getVectorNumElements();
1013 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1014 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1018 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1019 return DAG.getTargetInsertSubreg(ARM64::dsub, DL, WideTy, Undef, V64Reg);
1023 /// NarrowVector - Given a value in the V128 register class, produce the
1024 /// equivalent value in the V64 register class.
1025 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1026 EVT VT = V128Reg.getValueType();
1027 unsigned WideSize = VT.getVectorNumElements();
1028 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1029 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1031 return DAG.getTargetExtractSubreg(ARM64::dsub, SDLoc(V128Reg), NarrowTy,
1035 SDNode *ARM64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1038 EVT VT = N->getValueType(0);
1039 bool Narrow = VT.getSizeInBits() == 64;
1041 // Form a REG_SEQUENCE to force register allocation.
1042 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1045 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1046 WidenVector(*CurDAG));
1048 SDValue RegSeq = createQTuple(Regs);
1050 std::vector<EVT> ResTys;
1051 ResTys.push_back(MVT::Untyped);
1052 ResTys.push_back(MVT::Other);
1055 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1057 SmallVector<SDValue, 6> Ops;
1058 Ops.push_back(RegSeq);
1059 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1060 Ops.push_back(N->getOperand(NumVecs + 3));
1061 Ops.push_back(N->getOperand(0));
1062 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1063 SDValue SuperReg = SDValue(Ld, 0);
1065 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1069 CurDAG->getTargetExtractSubreg(ARM64::qsub3, dl, WideVT, SuperReg);
1071 ReplaceUses(SDValue(N, 3), NarrowVector(NV3, *CurDAG));
1073 ReplaceUses(SDValue(N, 3), NV3);
1078 CurDAG->getTargetExtractSubreg(ARM64::qsub2, dl, WideVT, SuperReg);
1080 ReplaceUses(SDValue(N, 2), NarrowVector(NV2, *CurDAG));
1082 ReplaceUses(SDValue(N, 2), NV2);
1087 CurDAG->getTargetExtractSubreg(ARM64::qsub1, dl, WideVT, SuperReg);
1089 CurDAG->getTargetExtractSubreg(ARM64::qsub0, dl, WideVT, SuperReg);
1091 ReplaceUses(SDValue(N, 1), NarrowVector(NV1, *CurDAG));
1092 ReplaceUses(SDValue(N, 0), NarrowVector(NV0, *CurDAG));
1094 ReplaceUses(SDValue(N, 1), NV1);
1095 ReplaceUses(SDValue(N, 0), NV0);
1101 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1106 SDNode *ARM64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1109 EVT VT = N->getOperand(2)->getValueType(0);
1110 bool Narrow = VT.getSizeInBits() == 64;
1112 // Form a REG_SEQUENCE to force register allocation.
1113 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1116 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1117 WidenVector(*CurDAG));
1119 SDValue RegSeq = createQTuple(Regs);
1122 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1124 SmallVector<SDValue, 6> Ops;
1125 Ops.push_back(RegSeq);
1126 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1127 Ops.push_back(N->getOperand(NumVecs + 3));
1128 Ops.push_back(N->getOperand(0));
1129 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1131 // Transfer memoperands.
1132 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1133 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1134 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1139 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1140 unsigned &Opc, SDValue &Opd0,
1141 unsigned &LSB, unsigned &MSB,
1142 unsigned NumberOfIgnoredLowBits,
1143 bool BiggerPattern) {
1144 assert(N->getOpcode() == ISD::AND &&
1145 "N must be a AND operation to call this function");
1147 EVT VT = N->getValueType(0);
1149 // Here we can test the type of VT and return false when the type does not
1150 // match, but since it is done prior to that call in the current context
1151 // we turned that into an assert to avoid redundant code.
1152 assert((VT == MVT::i32 || VT == MVT::i64) &&
1153 "Type checking must have been done before calling this function");
1155 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1156 // changed the AND node to a 32-bit mask operation. We'll have to
1157 // undo that as part of the transform here if we want to catch all
1158 // the opportunities.
1159 // Currently the NumberOfIgnoredLowBits argument helps to recover
1160 // form these situations when matching bigger pattern (bitfield insert).
1162 // For unsigned extracts, check for a shift right and mask
1163 uint64_t And_imm = 0;
1164 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1167 const SDNode *Op0 = N->getOperand(0).getNode();
1169 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1170 // simplified. Try to undo that
1171 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1173 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1174 if (And_imm & (And_imm + 1))
1177 bool ClampMSB = false;
1178 uint64_t Srl_imm = 0;
1179 // Handle the SRL + ANY_EXTEND case.
1180 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1181 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1182 // Extend the incoming operand of the SRL to 64-bit.
1183 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1184 // Make sure to clamp the MSB so that we preserve the semantics of the
1185 // original operations.
1187 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1188 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1190 // If the shift result was truncated, we can still combine them.
1191 Opd0 = Op0->getOperand(0).getOperand(0);
1193 // Use the type of SRL node.
1194 VT = Opd0->getValueType(0);
1195 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1196 Opd0 = Op0->getOperand(0);
1197 } else if (BiggerPattern) {
1198 // Let's pretend a 0 shift right has been performed.
1199 // The resulting code will be at least as good as the original one
1200 // plus it may expose more opportunities for bitfield insert pattern.
1201 // FIXME: Currently we limit this to the bigger pattern, because
1202 // some optimizations expect AND and not UBFM
1203 Opd0 = N->getOperand(0);
1207 assert((BiggerPattern || (Srl_imm > 0 && Srl_imm < VT.getSizeInBits())) &&
1208 "bad amount in shift node!");
1211 MSB = Srl_imm + (VT == MVT::i32 ? CountTrailingOnes_32(And_imm)
1212 : CountTrailingOnes_64(And_imm)) -
1215 // Since we're moving the extend before the right shift operation, we need
1216 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1217 // the zeros which would get shifted in with the original right shift
1219 MSB = MSB > 31 ? 31 : MSB;
1221 Opc = VT == MVT::i32 ? ARM64::UBFMWri : ARM64::UBFMXri;
1225 static bool isOneBitExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1226 unsigned &LSB, unsigned &MSB) {
1227 // We are looking for the following pattern which basically extracts a single
1228 // bit from the source value and places it in the LSB of the destination
1229 // value, all other bits of the destination value or set to zero:
1231 // Value2 = AND Value, MaskImm
1232 // SRL Value2, ShiftImm
1234 // with MaskImm >> ShiftImm == 1.
1236 // This gets selected into a single UBFM:
1238 // UBFM Value, ShiftImm, ShiftImm
1241 if (N->getOpcode() != ISD::SRL)
1244 uint64_t And_mask = 0;
1245 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1248 Opd0 = N->getOperand(0).getOperand(0);
1250 uint64_t Srl_imm = 0;
1251 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1254 // Check whether we really have a one bit extract here.
1255 if (And_mask >> Srl_imm == 0x1) {
1256 if (N->getValueType(0) == MVT::i32)
1257 Opc = ARM64::UBFMWri;
1259 Opc = ARM64::UBFMXri;
1261 LSB = MSB = Srl_imm;
1269 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1270 unsigned &LSB, unsigned &MSB,
1271 bool BiggerPattern) {
1272 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1273 "N must be a SHR/SRA operation to call this function");
1275 EVT VT = N->getValueType(0);
1277 // Here we can test the type of VT and return false when the type does not
1278 // match, but since it is done prior to that call in the current context
1279 // we turned that into an assert to avoid redundant code.
1280 assert((VT == MVT::i32 || VT == MVT::i64) &&
1281 "Type checking must have been done before calling this function");
1283 // Check for AND + SRL doing a one bit extract.
1284 if (isOneBitExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
1287 // we're looking for a shift of a shift
1288 uint64_t Shl_imm = 0;
1289 uint64_t Trunc_bits = 0;
1290 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1291 Opd0 = N->getOperand(0).getOperand(0);
1292 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1293 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1294 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1295 // be considered as setting high 32 bits as zero. Our strategy here is to
1296 // always generate 64bit UBFM. This consistency will help the CSE pass
1297 // later find more redundancy.
1298 Opd0 = N->getOperand(0).getOperand(0);
1299 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1300 VT = Opd0->getValueType(0);
1301 assert(VT == MVT::i64 && "the promoted type should be i64");
1302 } else if (BiggerPattern) {
1303 // Let's pretend a 0 shift left has been performed.
1304 // FIXME: Currently we limit this to the bigger pattern case,
1305 // because some optimizations expect AND and not UBFM
1306 Opd0 = N->getOperand(0);
1310 assert(Shl_imm < VT.getSizeInBits() && "bad amount in shift node!");
1311 uint64_t Srl_imm = 0;
1312 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1315 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1316 "bad amount in shift node!");
1317 // Note: The width operand is encoded as width-1.
1318 unsigned Width = VT.getSizeInBits() - Trunc_bits - Srl_imm - 1;
1319 int sLSB = Srl_imm - Shl_imm;
1324 // SRA requires a signed extraction
1326 Opc = N->getOpcode() == ISD::SRA ? ARM64::SBFMWri : ARM64::UBFMWri;
1328 Opc = N->getOpcode() == ISD::SRA ? ARM64::SBFMXri : ARM64::UBFMXri;
1332 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1333 SDValue &Opd0, unsigned &LSB, unsigned &MSB,
1334 unsigned NumberOfIgnoredLowBits = 0,
1335 bool BiggerPattern = false) {
1336 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1339 switch (N->getOpcode()) {
1341 if (!N->isMachineOpcode())
1345 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
1346 NumberOfIgnoredLowBits, BiggerPattern);
1349 return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
1352 unsigned NOpc = N->getMachineOpcode();
1356 case ARM64::SBFMWri:
1357 case ARM64::UBFMWri:
1358 case ARM64::SBFMXri:
1359 case ARM64::UBFMXri:
1361 Opd0 = N->getOperand(0);
1362 LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1363 MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1370 SDNode *ARM64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1371 unsigned Opc, LSB, MSB;
1373 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
1376 EVT VT = N->getValueType(0);
1378 // If the bit extract operation is 64bit but the original type is 32bit, we
1379 // need to add one EXTRACT_SUBREG.
1380 if ((Opc == ARM64::SBFMXri || Opc == ARM64::UBFMXri) && VT == MVT::i32) {
1381 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, MVT::i64),
1382 CurDAG->getTargetConstant(MSB, MVT::i64)};
1384 SDNode *BFM = CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i64, Ops64);
1385 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
1386 MachineSDNode *Node =
1387 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32,
1388 SDValue(BFM, 0), SubReg);
1392 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, VT),
1393 CurDAG->getTargetConstant(MSB, VT)};
1394 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 3);
1397 /// Does DstMask form a complementary pair with the mask provided by
1398 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1399 /// this asks whether DstMask zeroes precisely those bits that will be set by
1401 static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1402 unsigned NumberOfIgnoredHighBits, EVT VT) {
1403 assert((VT == MVT::i32 || VT == MVT::i64) &&
1404 "i32 or i64 mask type expected!");
1405 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1407 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1408 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1410 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1411 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1414 // Look for bits that will be useful for later uses.
1415 // A bit is consider useless as soon as it is dropped and never used
1416 // before it as been dropped.
1417 // E.g., looking for useful bit of x
1420 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1422 // After #2, the useful bits of x are 0x4.
1423 // However, if x is used on an unpredicatable instruction, then all its bits
1429 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1431 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1434 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1435 Imm = ARM64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1436 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1437 getUsefulBits(Op, UsefulBits, Depth + 1);
1440 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1441 uint64_t Imm, uint64_t MSB,
1443 // inherit the bitwidth value
1444 APInt OpUsefulBits(UsefulBits);
1448 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1450 // The interesting part will be in the lower part of the result
1451 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1452 // The interesting part was starting at Imm in the argument
1453 OpUsefulBits = OpUsefulBits.shl(Imm);
1455 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1457 // The interesting part will be shifted in the result
1458 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1459 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1460 // The interesting part was at zero in the argument
1461 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1464 UsefulBits &= OpUsefulBits;
1467 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1470 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1472 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1474 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1477 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1479 uint64_t ShiftTypeAndValue =
1480 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1481 APInt Mask(UsefulBits);
1482 Mask.clearAllBits();
1485 if (ARM64_AM::getShiftType(ShiftTypeAndValue) == ARM64_AM::LSL) {
1487 uint64_t ShiftAmt = ARM64_AM::getShiftValue(ShiftTypeAndValue);
1488 Mask = Mask.shl(ShiftAmt);
1489 getUsefulBits(Op, Mask, Depth + 1);
1490 Mask = Mask.lshr(ShiftAmt);
1491 } else if (ARM64_AM::getShiftType(ShiftTypeAndValue) == ARM64_AM::LSR) {
1493 // We do not handle ARM64_AM::ASR, because the sign will change the
1494 // number of useful bits
1495 uint64_t ShiftAmt = ARM64_AM::getShiftValue(ShiftTypeAndValue);
1496 Mask = Mask.lshr(ShiftAmt);
1497 getUsefulBits(Op, Mask, Depth + 1);
1498 Mask = Mask.shl(ShiftAmt);
1505 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1508 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1510 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1512 if (Op.getOperand(1) == Orig)
1513 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1515 APInt OpUsefulBits(UsefulBits);
1519 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1521 UsefulBits &= ~OpUsefulBits;
1522 getUsefulBits(Op, UsefulBits, Depth + 1);
1524 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1526 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1527 getUsefulBits(Op, UsefulBits, Depth + 1);
1531 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1532 SDValue Orig, unsigned Depth) {
1534 // Users of this node should have already been instruction selected
1535 // FIXME: Can we turn that into an assert?
1536 if (!UserNode->isMachineOpcode())
1539 switch (UserNode->getMachineOpcode()) {
1542 case ARM64::ANDSWri:
1543 case ARM64::ANDSXri:
1546 // We increment Depth only when we call the getUsefulBits
1547 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1549 case ARM64::UBFMWri:
1550 case ARM64::UBFMXri:
1551 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1555 if (UserNode->getOperand(1) != Orig)
1557 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1561 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1565 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1568 // Initialize UsefulBits
1570 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1571 // At the beginning, assume every produced bits is useful
1572 UsefulBits = APInt(Bitwidth, 0);
1573 UsefulBits.flipAllBits();
1575 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1577 for (SDNode *Node : Op.getNode()->uses()) {
1578 // A use cannot produce useful bits
1579 APInt UsefulBitsForUse = APInt(UsefulBits);
1580 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1581 UsersUsefulBits |= UsefulBitsForUse;
1583 // UsefulBits contains the produced bits that are meaningful for the
1584 // current definition, thus a user cannot make a bit meaningful at
1586 UsefulBits &= UsersUsefulBits;
1589 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
1590 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1591 /// 0, return Op unchanged.
1592 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1596 EVT VT = Op.getValueType();
1597 unsigned BitWidth = VT.getSizeInBits();
1598 unsigned UBFMOpc = BitWidth == 32 ? ARM64::UBFMWri : ARM64::UBFMXri;
1601 if (ShlAmount > 0) {
1602 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1603 ShiftNode = CurDAG->getMachineNode(
1604 UBFMOpc, SDLoc(Op), VT, Op,
1605 CurDAG->getTargetConstant(BitWidth - ShlAmount, VT),
1606 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, VT));
1608 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1609 assert(ShlAmount < 0 && "expected right shift");
1610 int ShrAmount = -ShlAmount;
1611 ShiftNode = CurDAG->getMachineNode(
1612 UBFMOpc, SDLoc(Op), VT, Op, CurDAG->getTargetConstant(ShrAmount, VT),
1613 CurDAG->getTargetConstant(BitWidth - 1, VT));
1616 return SDValue(ShiftNode, 0);
1619 /// Does this tree qualify as an attempt to move a bitfield into position,
1620 /// essentially "(and (shl VAL, N), Mask)".
1621 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1622 SDValue &Src, int &ShiftAmount,
1624 EVT VT = Op.getValueType();
1625 unsigned BitWidth = VT.getSizeInBits();
1627 assert(BitWidth == 32 || BitWidth == 64);
1629 APInt KnownZero, KnownOne;
1630 CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
1632 // Non-zero in the sense that they're not provably zero, which is the key
1633 // point if we want to use this value
1634 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1636 // Discard a constant AND mask if present. It's safe because the node will
1637 // already have been factored into the ComputeMaskedBits calculation above.
1639 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1640 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1641 Op = Op.getOperand(0);
1645 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1647 Op = Op.getOperand(0);
1649 if (!isShiftedMask_64(NonZeroBits))
1652 ShiftAmount = countTrailingZeros(NonZeroBits);
1653 MaskWidth = CountTrailingOnes_64(NonZeroBits >> ShiftAmount);
1655 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1656 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1658 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1663 // Given a OR operation, check if we have the following pattern
1664 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1665 // isBitfieldExtractOp)
1666 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1667 // countTrailingZeros(mask2) == imm2 - imm + 1
1669 // if yes, given reference arguments will be update so that one can replace
1670 // the OR instruction with:
1671 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1672 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1673 SDValue &Src, unsigned &ImmR,
1674 unsigned &ImmS, SelectionDAG *CurDAG) {
1675 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1678 EVT VT = N->getValueType(0);
1680 Opc = ARM64::BFMWri;
1681 else if (VT == MVT::i64)
1682 Opc = ARM64::BFMXri;
1686 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1687 // have the expected shape. Try to undo that.
1689 getUsefulBits(SDValue(N, 0), UsefulBits);
1691 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1692 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1694 // OR is commutative, check both possibilities (does llvm provide a
1695 // way to do that directely, e.g., via code matcher?)
1696 SDValue OrOpd1Val = N->getOperand(1);
1697 SDNode *OrOpd0 = N->getOperand(0).getNode();
1698 SDNode *OrOpd1 = N->getOperand(1).getNode();
1699 for (int i = 0; i < 2;
1700 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1703 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
1704 NumberOfIgnoredLowBits, true)) {
1705 // Check that the returned opcode is compatible with the pattern,
1706 // i.e., same type and zero extended (U and not S)
1707 if ((BFXOpc != ARM64::UBFMXri && VT == MVT::i64) ||
1708 (BFXOpc != ARM64::UBFMWri && VT == MVT::i32))
1711 // Compute the width of the bitfield insertion
1713 Width = ImmS - ImmR + 1;
1714 // FIXME: This constraint is to catch bitfield insertion we may
1715 // want to widen the pattern if we want to grab general bitfied
1720 // If the mask on the insertee is correct, we have a BFXIL operation. We
1721 // can share the ImmR and ImmS values from the already-computed UBFM.
1722 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
1724 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
1729 // Check the second part of the pattern
1730 EVT VT = OrOpd1->getValueType(0);
1731 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
1733 // Compute the Known Zero for the candidate of the first operand.
1734 // This allows to catch more general case than just looking for
1735 // AND with imm. Indeed, simplify-demanded-bits may have removed
1736 // the AND instruction because it proves it was useless.
1737 APInt KnownZero, KnownOne;
1738 CurDAG->ComputeMaskedBits(OrOpd1Val, KnownZero, KnownOne);
1740 // Check if there is enough room for the second operand to appear
1742 APInt BitsToBeInserted =
1743 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
1745 if ((BitsToBeInserted & ~KnownZero) != 0)
1748 // Set the first operand
1750 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1751 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
1752 // In that case, we can eliminate the AND
1753 Dst = OrOpd1->getOperand(0);
1755 // Maybe the AND has been removed by simplify-demanded-bits
1756 // or is useful because it discards more bits
1766 SDNode *ARM64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
1767 if (N->getOpcode() != ISD::OR)
1774 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
1777 EVT VT = N->getValueType(0);
1778 SDValue Ops[] = { Opd0,
1780 CurDAG->getTargetConstant(LSB, VT),
1781 CurDAG->getTargetConstant(MSB, VT) };
1782 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 4);
1785 SDNode *ARM64DAGToDAGISel::SelectLIBM(SDNode *N) {
1786 EVT VT = N->getValueType(0);
1789 unsigned FRINTXOpcs[] = { ARM64::FRINTXSr, ARM64::FRINTXDr };
1791 if (VT == MVT::f32) {
1793 } else if (VT == MVT::f64) {
1796 return nullptr; // Unrecognized argument type. Fall back on default codegen.
1798 // Pick the FRINTX variant needed to set the flags.
1799 unsigned FRINTXOpc = FRINTXOpcs[Variant];
1801 switch (N->getOpcode()) {
1803 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
1805 unsigned FRINTPOpcs[] = { ARM64::FRINTPSr, ARM64::FRINTPDr };
1806 Opc = FRINTPOpcs[Variant];
1810 unsigned FRINTMOpcs[] = { ARM64::FRINTMSr, ARM64::FRINTMDr };
1811 Opc = FRINTMOpcs[Variant];
1815 unsigned FRINTZOpcs[] = { ARM64::FRINTZSr, ARM64::FRINTZDr };
1816 Opc = FRINTZOpcs[Variant];
1820 unsigned FRINTAOpcs[] = { ARM64::FRINTASr, ARM64::FRINTADr };
1821 Opc = FRINTAOpcs[Variant];
1827 SDValue In = N->getOperand(0);
1828 SmallVector<SDValue, 2> Ops;
1831 if (!TM.Options.UnsafeFPMath) {
1832 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
1833 Ops.push_back(SDValue(FRINTX, 1));
1836 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
1840 ARM64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
1841 unsigned RegWidth) {
1843 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
1844 FVal = CN->getValueAPF();
1845 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
1846 // Some otherwise illegal constants are allowed in this case.
1847 if (LN->getOperand(1).getOpcode() != ARM64ISD::ADDlow ||
1848 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
1851 ConstantPoolSDNode *CN =
1852 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
1853 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
1857 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
1858 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
1861 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
1862 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
1866 // fbits is between 1 and 64 in the worst-case, which means the fmul
1867 // could have 2^64 as an actual operand. Need 65 bits of precision.
1868 APSInt IntVal(65, true);
1869 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
1871 // N.b. isPowerOf2 also checks for > 0.
1872 if (!IsExact || !IntVal.isPowerOf2()) return false;
1873 unsigned FBits = IntVal.logBase2();
1875 // Checks above should have guaranteed that we haven't lost information in
1876 // finding FBits, but it must still be in range.
1877 if (FBits == 0 || FBits > RegWidth) return false;
1879 FixedPos = CurDAG->getTargetConstant(FBits, MVT::i32);
1883 SDNode *ARM64DAGToDAGISel::Select(SDNode *Node) {
1884 // Dump information about the Node being selected
1885 DEBUG(errs() << "Selecting: ");
1886 DEBUG(Node->dump(CurDAG));
1887 DEBUG(errs() << "\n");
1889 // If we have a custom node, we already have selected!
1890 if (Node->isMachineOpcode()) {
1891 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
1892 Node->setNodeId(-1);
1896 // Few custom selection stuff.
1897 SDNode *ResNode = nullptr;
1898 EVT VT = Node->getValueType(0);
1900 switch (Node->getOpcode()) {
1905 if (SDNode *I = SelectMLAV64LaneV128(Node))
1910 // Try to select as an indexed load. Fall through to normal processing
1913 SDNode *I = SelectIndexedLoad(Node, Done);
1922 if (SDNode *I = SelectBitfieldExtractOp(Node))
1927 if (SDNode *I = SelectBitfieldInsertOp(Node))
1931 case ISD::EXTRACT_VECTOR_ELT: {
1932 // Extracting lane zero is a special case where we can just use a plain
1933 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
1934 // the rest of the compiler, especially the register allocator and copyi
1935 // propagation, to reason about, so is preferred when it's possible to
1937 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
1938 // Bail and use the default Select() for non-zero lanes.
1939 if (LaneNode->getZExtValue() != 0)
1941 // If the element type is not the same as the result type, likewise
1942 // bail and use the default Select(), as there's more to do than just
1943 // a cross-class COPY. This catches extracts of i8 and i16 elements
1944 // since they will need an explicit zext.
1945 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
1948 switch (Node->getOperand(0)
1950 .getVectorElementType()
1953 assert(0 && "Unexpected vector element type!");
1955 SubReg = ARM64::dsub;
1958 SubReg = ARM64::ssub;
1960 case 16: // FALLTHROUGH
1962 llvm_unreachable("unexpected zext-requiring extract element!");
1964 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
1965 Node->getOperand(0));
1966 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
1967 DEBUG(Extract->dumpr(CurDAG));
1968 DEBUG(dbgs() << "\n");
1969 return Extract.getNode();
1971 case ISD::Constant: {
1972 // Materialize zero constants as copies from WZR/XZR. This allows
1973 // the coalescer to propagate these into other instructions.
1974 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
1975 if (ConstNode->isNullValue()) {
1977 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
1978 ARM64::WZR, MVT::i32).getNode();
1979 else if (VT == MVT::i64)
1980 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
1981 ARM64::XZR, MVT::i64).getNode();
1986 case ISD::FrameIndex: {
1987 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
1988 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
1989 unsigned Shifter = ARM64_AM::getShifterImm(ARM64_AM::LSL, 0);
1990 const TargetLowering *TLI = getTargetLowering();
1991 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1992 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
1993 CurDAG->getTargetConstant(Shifter, MVT::i32) };
1994 return CurDAG->SelectNodeTo(Node, ARM64::ADDXri, MVT::i64, Ops, 3);
1996 case ISD::INTRINSIC_W_CHAIN: {
1997 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2001 case Intrinsic::arm64_ldaxp:
2002 case Intrinsic::arm64_ldxp: {
2004 IntNo == Intrinsic::arm64_ldaxp ? ARM64::LDAXPX : ARM64::LDXPX;
2005 SDValue MemAddr = Node->getOperand(2);
2007 SDValue Chain = Node->getOperand(0);
2009 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2010 MVT::Other, MemAddr, Chain);
2012 // Transfer memoperands.
2013 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2014 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2015 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2018 case Intrinsic::arm64_stlxp:
2019 case Intrinsic::arm64_stxp: {
2021 IntNo == Intrinsic::arm64_stlxp ? ARM64::STLXPX : ARM64::STXPX;
2023 SDValue Chain = Node->getOperand(0);
2024 SDValue ValLo = Node->getOperand(2);
2025 SDValue ValHi = Node->getOperand(3);
2026 SDValue MemAddr = Node->getOperand(4);
2028 // Place arguments in the right order.
2029 SmallVector<SDValue, 7> Ops;
2030 Ops.push_back(ValLo);
2031 Ops.push_back(ValHi);
2032 Ops.push_back(MemAddr);
2033 Ops.push_back(Chain);
2035 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2036 // Transfer memoperands.
2037 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2038 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2039 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2043 case Intrinsic::arm64_neon_ld1x2:
2044 if (VT == MVT::v8i8)
2045 return SelectLoad(Node, 2, ARM64::LD1Twov8b, ARM64::dsub0);
2046 else if (VT == MVT::v16i8)
2047 return SelectLoad(Node, 2, ARM64::LD1Twov16b, ARM64::qsub0);
2048 else if (VT == MVT::v4i16)
2049 return SelectLoad(Node, 2, ARM64::LD1Twov4h, ARM64::dsub0);
2050 else if (VT == MVT::v8i16)
2051 return SelectLoad(Node, 2, ARM64::LD1Twov8h, ARM64::qsub0);
2052 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2053 return SelectLoad(Node, 2, ARM64::LD1Twov2s, ARM64::dsub0);
2054 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2055 return SelectLoad(Node, 2, ARM64::LD1Twov4s, ARM64::qsub0);
2056 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2057 return SelectLoad(Node, 2, ARM64::LD1Twov1d, ARM64::dsub0);
2058 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2059 return SelectLoad(Node, 2, ARM64::LD1Twov2d, ARM64::qsub0);
2061 case Intrinsic::arm64_neon_ld1x3:
2062 if (VT == MVT::v8i8)
2063 return SelectLoad(Node, 3, ARM64::LD1Threev8b, ARM64::dsub0);
2064 else if (VT == MVT::v16i8)
2065 return SelectLoad(Node, 3, ARM64::LD1Threev16b, ARM64::qsub0);
2066 else if (VT == MVT::v4i16)
2067 return SelectLoad(Node, 3, ARM64::LD1Threev4h, ARM64::dsub0);
2068 else if (VT == MVT::v8i16)
2069 return SelectLoad(Node, 3, ARM64::LD1Threev8h, ARM64::qsub0);
2070 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2071 return SelectLoad(Node, 3, ARM64::LD1Threev2s, ARM64::dsub0);
2072 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2073 return SelectLoad(Node, 3, ARM64::LD1Threev4s, ARM64::qsub0);
2074 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2075 return SelectLoad(Node, 3, ARM64::LD1Threev1d, ARM64::dsub0);
2076 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2077 return SelectLoad(Node, 3, ARM64::LD1Threev2d, ARM64::qsub0);
2079 case Intrinsic::arm64_neon_ld1x4:
2080 if (VT == MVT::v8i8)
2081 return SelectLoad(Node, 4, ARM64::LD1Fourv8b, ARM64::dsub0);
2082 else if (VT == MVT::v16i8)
2083 return SelectLoad(Node, 4, ARM64::LD1Fourv16b, ARM64::qsub0);
2084 else if (VT == MVT::v4i16)
2085 return SelectLoad(Node, 4, ARM64::LD1Fourv4h, ARM64::dsub0);
2086 else if (VT == MVT::v8i16)
2087 return SelectLoad(Node, 4, ARM64::LD1Fourv8h, ARM64::qsub0);
2088 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2089 return SelectLoad(Node, 4, ARM64::LD1Fourv2s, ARM64::dsub0);
2090 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2091 return SelectLoad(Node, 4, ARM64::LD1Fourv4s, ARM64::qsub0);
2092 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2093 return SelectLoad(Node, 4, ARM64::LD1Fourv1d, ARM64::dsub0);
2094 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2095 return SelectLoad(Node, 4, ARM64::LD1Fourv2d, ARM64::qsub0);
2097 case Intrinsic::arm64_neon_ld2:
2098 if (VT == MVT::v8i8)
2099 return SelectLoad(Node, 2, ARM64::LD2Twov8b, ARM64::dsub0);
2100 else if (VT == MVT::v16i8)
2101 return SelectLoad(Node, 2, ARM64::LD2Twov16b, ARM64::qsub0);
2102 else if (VT == MVT::v4i16)
2103 return SelectLoad(Node, 2, ARM64::LD2Twov4h, ARM64::dsub0);
2104 else if (VT == MVT::v8i16)
2105 return SelectLoad(Node, 2, ARM64::LD2Twov8h, ARM64::qsub0);
2106 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2107 return SelectLoad(Node, 2, ARM64::LD2Twov2s, ARM64::dsub0);
2108 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2109 return SelectLoad(Node, 2, ARM64::LD2Twov4s, ARM64::qsub0);
2110 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2111 return SelectLoad(Node, 2, ARM64::LD1Twov1d, ARM64::dsub0);
2112 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2113 return SelectLoad(Node, 2, ARM64::LD2Twov2d, ARM64::qsub0);
2115 case Intrinsic::arm64_neon_ld3:
2116 if (VT == MVT::v8i8)
2117 return SelectLoad(Node, 3, ARM64::LD3Threev8b, ARM64::dsub0);
2118 else if (VT == MVT::v16i8)
2119 return SelectLoad(Node, 3, ARM64::LD3Threev16b, ARM64::qsub0);
2120 else if (VT == MVT::v4i16)
2121 return SelectLoad(Node, 3, ARM64::LD3Threev4h, ARM64::dsub0);
2122 else if (VT == MVT::v8i16)
2123 return SelectLoad(Node, 3, ARM64::LD3Threev8h, ARM64::qsub0);
2124 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2125 return SelectLoad(Node, 3, ARM64::LD3Threev2s, ARM64::dsub0);
2126 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2127 return SelectLoad(Node, 3, ARM64::LD3Threev4s, ARM64::qsub0);
2128 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2129 return SelectLoad(Node, 3, ARM64::LD1Threev1d, ARM64::dsub0);
2130 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2131 return SelectLoad(Node, 3, ARM64::LD3Threev2d, ARM64::qsub0);
2133 case Intrinsic::arm64_neon_ld4:
2134 if (VT == MVT::v8i8)
2135 return SelectLoad(Node, 4, ARM64::LD4Fourv8b, ARM64::dsub0);
2136 else if (VT == MVT::v16i8)
2137 return SelectLoad(Node, 4, ARM64::LD4Fourv16b, ARM64::qsub0);
2138 else if (VT == MVT::v4i16)
2139 return SelectLoad(Node, 4, ARM64::LD4Fourv4h, ARM64::dsub0);
2140 else if (VT == MVT::v8i16)
2141 return SelectLoad(Node, 4, ARM64::LD4Fourv8h, ARM64::qsub0);
2142 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2143 return SelectLoad(Node, 4, ARM64::LD4Fourv2s, ARM64::dsub0);
2144 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2145 return SelectLoad(Node, 4, ARM64::LD4Fourv4s, ARM64::qsub0);
2146 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2147 return SelectLoad(Node, 4, ARM64::LD1Fourv1d, ARM64::dsub0);
2148 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2149 return SelectLoad(Node, 4, ARM64::LD4Fourv2d, ARM64::qsub0);
2151 case Intrinsic::arm64_neon_ld2r:
2152 if (VT == MVT::v8i8)
2153 return SelectLoad(Node, 2, ARM64::LD2Rv8b, ARM64::dsub0);
2154 else if (VT == MVT::v16i8)
2155 return SelectLoad(Node, 2, ARM64::LD2Rv16b, ARM64::qsub0);
2156 else if (VT == MVT::v4i16)
2157 return SelectLoad(Node, 2, ARM64::LD2Rv4h, ARM64::dsub0);
2158 else if (VT == MVT::v8i16)
2159 return SelectLoad(Node, 2, ARM64::LD2Rv8h, ARM64::qsub0);
2160 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2161 return SelectLoad(Node, 2, ARM64::LD2Rv2s, ARM64::dsub0);
2162 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2163 return SelectLoad(Node, 2, ARM64::LD2Rv4s, ARM64::qsub0);
2164 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2165 return SelectLoad(Node, 2, ARM64::LD2Rv1d, ARM64::dsub0);
2166 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2167 return SelectLoad(Node, 2, ARM64::LD2Rv2d, ARM64::qsub0);
2169 case Intrinsic::arm64_neon_ld3r:
2170 if (VT == MVT::v8i8)
2171 return SelectLoad(Node, 3, ARM64::LD3Rv8b, ARM64::dsub0);
2172 else if (VT == MVT::v16i8)
2173 return SelectLoad(Node, 3, ARM64::LD3Rv16b, ARM64::qsub0);
2174 else if (VT == MVT::v4i16)
2175 return SelectLoad(Node, 3, ARM64::LD3Rv4h, ARM64::dsub0);
2176 else if (VT == MVT::v8i16)
2177 return SelectLoad(Node, 3, ARM64::LD3Rv8h, ARM64::qsub0);
2178 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2179 return SelectLoad(Node, 3, ARM64::LD3Rv2s, ARM64::dsub0);
2180 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2181 return SelectLoad(Node, 3, ARM64::LD3Rv4s, ARM64::qsub0);
2182 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2183 return SelectLoad(Node, 3, ARM64::LD3Rv1d, ARM64::dsub0);
2184 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2185 return SelectLoad(Node, 3, ARM64::LD3Rv2d, ARM64::qsub0);
2187 case Intrinsic::arm64_neon_ld4r:
2188 if (VT == MVT::v8i8)
2189 return SelectLoad(Node, 4, ARM64::LD4Rv8b, ARM64::dsub0);
2190 else if (VT == MVT::v16i8)
2191 return SelectLoad(Node, 4, ARM64::LD4Rv16b, ARM64::qsub0);
2192 else if (VT == MVT::v4i16)
2193 return SelectLoad(Node, 4, ARM64::LD4Rv4h, ARM64::dsub0);
2194 else if (VT == MVT::v8i16)
2195 return SelectLoad(Node, 4, ARM64::LD4Rv8h, ARM64::qsub0);
2196 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2197 return SelectLoad(Node, 4, ARM64::LD4Rv2s, ARM64::dsub0);
2198 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2199 return SelectLoad(Node, 4, ARM64::LD4Rv4s, ARM64::qsub0);
2200 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2201 return SelectLoad(Node, 4, ARM64::LD4Rv1d, ARM64::dsub0);
2202 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2203 return SelectLoad(Node, 4, ARM64::LD4Rv2d, ARM64::qsub0);
2205 case Intrinsic::arm64_neon_ld2lane:
2206 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2207 return SelectLoadLane(Node, 2, ARM64::LD2i8);
2208 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2209 return SelectLoadLane(Node, 2, ARM64::LD2i16);
2210 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2212 return SelectLoadLane(Node, 2, ARM64::LD2i32);
2213 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2215 return SelectLoadLane(Node, 2, ARM64::LD2i64);
2217 case Intrinsic::arm64_neon_ld3lane:
2218 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2219 return SelectLoadLane(Node, 3, ARM64::LD3i8);
2220 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2221 return SelectLoadLane(Node, 3, ARM64::LD3i16);
2222 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2224 return SelectLoadLane(Node, 3, ARM64::LD3i32);
2225 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2227 return SelectLoadLane(Node, 3, ARM64::LD3i64);
2229 case Intrinsic::arm64_neon_ld4lane:
2230 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2231 return SelectLoadLane(Node, 4, ARM64::LD4i8);
2232 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2233 return SelectLoadLane(Node, 4, ARM64::LD4i16);
2234 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2236 return SelectLoadLane(Node, 4, ARM64::LD4i32);
2237 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2239 return SelectLoadLane(Node, 4, ARM64::LD4i64);
2243 case ISD::INTRINSIC_WO_CHAIN: {
2244 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2248 case Intrinsic::arm64_neon_tbl2:
2249 return SelectTable(Node, 2, VT == MVT::v8i8 ? ARM64::TBLv8i8Two
2250 : ARM64::TBLv16i8Two,
2252 case Intrinsic::arm64_neon_tbl3:
2253 return SelectTable(Node, 3, VT == MVT::v8i8 ? ARM64::TBLv8i8Three
2254 : ARM64::TBLv16i8Three,
2256 case Intrinsic::arm64_neon_tbl4:
2257 return SelectTable(Node, 4, VT == MVT::v8i8 ? ARM64::TBLv8i8Four
2258 : ARM64::TBLv16i8Four,
2260 case Intrinsic::arm64_neon_tbx2:
2261 return SelectTable(Node, 2, VT == MVT::v8i8 ? ARM64::TBXv8i8Two
2262 : ARM64::TBXv16i8Two,
2264 case Intrinsic::arm64_neon_tbx3:
2265 return SelectTable(Node, 3, VT == MVT::v8i8 ? ARM64::TBXv8i8Three
2266 : ARM64::TBXv16i8Three,
2268 case Intrinsic::arm64_neon_tbx4:
2269 return SelectTable(Node, 4, VT == MVT::v8i8 ? ARM64::TBXv8i8Four
2270 : ARM64::TBXv16i8Four,
2272 case Intrinsic::arm64_neon_smull:
2273 case Intrinsic::arm64_neon_umull:
2274 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2280 case ISD::INTRINSIC_VOID: {
2281 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2282 if (Node->getNumOperands() >= 3)
2283 VT = Node->getOperand(2)->getValueType(0);
2287 case Intrinsic::arm64_neon_st1x2: {
2288 if (VT == MVT::v8i8)
2289 return SelectStore(Node, 2, ARM64::ST1Twov8b);
2290 else if (VT == MVT::v16i8)
2291 return SelectStore(Node, 2, ARM64::ST1Twov16b);
2292 else if (VT == MVT::v4i16)
2293 return SelectStore(Node, 2, ARM64::ST1Twov4h);
2294 else if (VT == MVT::v8i16)
2295 return SelectStore(Node, 2, ARM64::ST1Twov8h);
2296 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2297 return SelectStore(Node, 2, ARM64::ST1Twov2s);
2298 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2299 return SelectStore(Node, 2, ARM64::ST1Twov4s);
2300 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2301 return SelectStore(Node, 2, ARM64::ST1Twov2d);
2302 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2303 return SelectStore(Node, 2, ARM64::ST1Twov1d);
2306 case Intrinsic::arm64_neon_st1x3: {
2307 if (VT == MVT::v8i8)
2308 return SelectStore(Node, 3, ARM64::ST1Threev8b);
2309 else if (VT == MVT::v16i8)
2310 return SelectStore(Node, 3, ARM64::ST1Threev16b);
2311 else if (VT == MVT::v4i16)
2312 return SelectStore(Node, 3, ARM64::ST1Threev4h);
2313 else if (VT == MVT::v8i16)
2314 return SelectStore(Node, 3, ARM64::ST1Threev8h);
2315 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2316 return SelectStore(Node, 3, ARM64::ST1Threev2s);
2317 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2318 return SelectStore(Node, 3, ARM64::ST1Threev4s);
2319 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2320 return SelectStore(Node, 3, ARM64::ST1Threev2d);
2321 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2322 return SelectStore(Node, 3, ARM64::ST1Threev1d);
2325 case Intrinsic::arm64_neon_st1x4: {
2326 if (VT == MVT::v8i8)
2327 return SelectStore(Node, 4, ARM64::ST1Fourv8b);
2328 else if (VT == MVT::v16i8)
2329 return SelectStore(Node, 4, ARM64::ST1Fourv16b);
2330 else if (VT == MVT::v4i16)
2331 return SelectStore(Node, 4, ARM64::ST1Fourv4h);
2332 else if (VT == MVT::v8i16)
2333 return SelectStore(Node, 4, ARM64::ST1Fourv8h);
2334 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2335 return SelectStore(Node, 4, ARM64::ST1Fourv2s);
2336 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2337 return SelectStore(Node, 4, ARM64::ST1Fourv4s);
2338 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2339 return SelectStore(Node, 4, ARM64::ST1Fourv2d);
2340 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2341 return SelectStore(Node, 4, ARM64::ST1Fourv1d);
2344 case Intrinsic::arm64_neon_st2: {
2345 if (VT == MVT::v8i8)
2346 return SelectStore(Node, 2, ARM64::ST2Twov8b);
2347 else if (VT == MVT::v16i8)
2348 return SelectStore(Node, 2, ARM64::ST2Twov16b);
2349 else if (VT == MVT::v4i16)
2350 return SelectStore(Node, 2, ARM64::ST2Twov4h);
2351 else if (VT == MVT::v8i16)
2352 return SelectStore(Node, 2, ARM64::ST2Twov8h);
2353 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2354 return SelectStore(Node, 2, ARM64::ST2Twov2s);
2355 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2356 return SelectStore(Node, 2, ARM64::ST2Twov4s);
2357 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2358 return SelectStore(Node, 2, ARM64::ST2Twov2d);
2359 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2360 return SelectStore(Node, 2, ARM64::ST1Twov1d);
2363 case Intrinsic::arm64_neon_st3: {
2364 if (VT == MVT::v8i8)
2365 return SelectStore(Node, 3, ARM64::ST3Threev8b);
2366 else if (VT == MVT::v16i8)
2367 return SelectStore(Node, 3, ARM64::ST3Threev16b);
2368 else if (VT == MVT::v4i16)
2369 return SelectStore(Node, 3, ARM64::ST3Threev4h);
2370 else if (VT == MVT::v8i16)
2371 return SelectStore(Node, 3, ARM64::ST3Threev8h);
2372 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2373 return SelectStore(Node, 3, ARM64::ST3Threev2s);
2374 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2375 return SelectStore(Node, 3, ARM64::ST3Threev4s);
2376 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2377 return SelectStore(Node, 3, ARM64::ST3Threev2d);
2378 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2379 return SelectStore(Node, 3, ARM64::ST1Threev1d);
2382 case Intrinsic::arm64_neon_st4: {
2383 if (VT == MVT::v8i8)
2384 return SelectStore(Node, 4, ARM64::ST4Fourv8b);
2385 else if (VT == MVT::v16i8)
2386 return SelectStore(Node, 4, ARM64::ST4Fourv16b);
2387 else if (VT == MVT::v4i16)
2388 return SelectStore(Node, 4, ARM64::ST4Fourv4h);
2389 else if (VT == MVT::v8i16)
2390 return SelectStore(Node, 4, ARM64::ST4Fourv8h);
2391 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2392 return SelectStore(Node, 4, ARM64::ST4Fourv2s);
2393 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2394 return SelectStore(Node, 4, ARM64::ST4Fourv4s);
2395 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2396 return SelectStore(Node, 4, ARM64::ST4Fourv2d);
2397 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2398 return SelectStore(Node, 4, ARM64::ST1Fourv1d);
2401 case Intrinsic::arm64_neon_st2lane: {
2402 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2403 return SelectStoreLane(Node, 2, ARM64::ST2i8);
2404 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2405 return SelectStoreLane(Node, 2, ARM64::ST2i16);
2406 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2408 return SelectStoreLane(Node, 2, ARM64::ST2i32);
2409 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2411 return SelectStoreLane(Node, 2, ARM64::ST2i64);
2414 case Intrinsic::arm64_neon_st3lane: {
2415 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2416 return SelectStoreLane(Node, 3, ARM64::ST3i8);
2417 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2418 return SelectStoreLane(Node, 3, ARM64::ST3i16);
2419 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2421 return SelectStoreLane(Node, 3, ARM64::ST3i32);
2422 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2424 return SelectStoreLane(Node, 3, ARM64::ST3i64);
2427 case Intrinsic::arm64_neon_st4lane: {
2428 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2429 return SelectStoreLane(Node, 4, ARM64::ST4i8);
2430 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2431 return SelectStoreLane(Node, 4, ARM64::ST4i16);
2432 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2434 return SelectStoreLane(Node, 4, ARM64::ST4i32);
2435 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2437 return SelectStoreLane(Node, 4, ARM64::ST4i64);
2447 if (SDNode *I = SelectLIBM(Node))
2452 // Select the default instruction
2453 ResNode = SelectCode(Node);
2455 DEBUG(errs() << "=> ");
2456 if (ResNode == nullptr || ResNode == Node)
2457 DEBUG(Node->dump(CurDAG));
2459 DEBUG(ResNode->dump(CurDAG));
2460 DEBUG(errs() << "\n");
2465 /// createARM64ISelDag - This pass converts a legalized DAG into a
2466 /// ARM64-specific DAG, ready for instruction scheduling.
2467 FunctionPass *llvm::createARM64ISelDag(ARM64TargetMachine &TM,
2468 CodeGenOpt::Level OptLevel) {
2469 return new ARM64DAGToDAGISel(TM, OptLevel);