1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the AArch64 target.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
28 #define DEBUG_TYPE "aarch64-isel"
30 //===--------------------------------------------------------------------===//
31 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32 /// instructions for SelectionDAG operations.
36 class AArch64DAGToDAGISel : public SelectionDAGISel {
37 AArch64TargetMachine &TM;
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
48 : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr),
51 const char *getPassName() const override {
52 return "AArch64 Instruction Selection";
55 bool runOnMachineFunction(MachineFunction &MF) override {
56 ForCodeSize = MF.getFunction()->optForSize();
57 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
58 return SelectionDAGISel::runOnMachineFunction(MF);
61 SDNode *Select(SDNode *Node) override;
63 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
64 /// inline asm expressions.
65 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
66 unsigned ConstraintID,
67 std::vector<SDValue> &OutOps) override;
69 SDNode *SelectMLAV64LaneV128(SDNode *N);
70 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
71 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
72 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
74 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
75 return SelectShiftedRegister(N, false, Reg, Shift);
77 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
78 return SelectShiftedRegister(N, true, Reg, Shift);
80 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
81 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
83 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
84 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
86 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
87 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
89 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
90 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
92 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
93 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
95 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
96 return SelectAddrModeIndexed(N, 1, Base, OffImm);
98 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
99 return SelectAddrModeIndexed(N, 2, Base, OffImm);
101 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
102 return SelectAddrModeIndexed(N, 4, Base, OffImm);
104 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
105 return SelectAddrModeIndexed(N, 8, Base, OffImm);
107 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
108 return SelectAddrModeIndexed(N, 16, Base, OffImm);
110 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
111 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
113 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
114 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
116 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
117 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
119 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
120 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
122 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
123 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
127 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
128 SDValue &SignExtend, SDValue &DoShift) {
129 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
133 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
134 SDValue &SignExtend, SDValue &DoShift) {
135 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
139 /// Form sequences of consecutive 64/128-bit registers for use in NEON
140 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
141 /// between 1 and 4 elements. If it contains a single element that is returned
142 /// unchanged; otherwise a REG_SEQUENCE value is returned.
143 SDValue createDTuple(ArrayRef<SDValue> Vecs);
144 SDValue createQTuple(ArrayRef<SDValue> Vecs);
146 /// Generic helper for the createDTuple/createQTuple
147 /// functions. Those should almost always be called instead.
148 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
149 const unsigned SubRegs[]);
151 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
153 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
155 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
157 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
159 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
160 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
162 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
163 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
164 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
165 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
167 SDNode *SelectBitfieldExtractOp(SDNode *N);
168 SDNode *SelectBitfieldInsertOp(SDNode *N);
169 SDNode *SelectBitfieldInsertInZeroOp(SDNode *N);
171 SDNode *SelectLIBM(SDNode *N);
172 SDNode *SelectFPConvertWithRound(SDNode *N);
174 SDNode *SelectReadRegister(SDNode *N);
175 SDNode *SelectWriteRegister(SDNode *N);
177 // Include the pieces autogenerated from the target description.
178 #include "AArch64GenDAGISel.inc"
181 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
183 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
185 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
187 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
189 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
190 SDValue &Offset, SDValue &SignExtend,
192 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
193 SDValue &Offset, SDValue &SignExtend,
195 bool isWorthFolding(SDValue V) const;
196 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
197 SDValue &Offset, SDValue &SignExtend);
199 template<unsigned RegWidth>
200 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
201 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
204 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
206 SDNode *GenerateInexactFlagIfNeeded(const SDValue &In, unsigned InTyVariant,
209 } // end anonymous namespace
211 /// isIntImmediate - This method tests to see if the node is a constant
212 /// operand. If so Imm will receive the 32-bit value.
213 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
214 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
215 Imm = C->getZExtValue();
221 // isIntImmediate - This method tests to see if a constant operand.
222 // If so Imm will receive the value.
223 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
224 return isIntImmediate(N.getNode(), Imm);
227 // isOpcWithIntImmediate - This method tests to see if the node is a specific
228 // opcode and that it has a immediate integer right operand.
229 // If so Imm will receive the 32 bit value.
230 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
232 return N->getOpcode() == Opc &&
233 isIntImmediate(N->getOperand(1).getNode(), Imm);
236 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
237 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
238 switch(ConstraintID) {
240 llvm_unreachable("Unexpected asm memory constraint");
241 case InlineAsm::Constraint_i:
242 case InlineAsm::Constraint_m:
243 case InlineAsm::Constraint_Q:
244 // Require the address to be in a register. That is safe for all AArch64
245 // variants and it is hard to do anything much smarter without knowing
246 // how the operand is used.
247 OutOps.push_back(Op);
253 /// SelectArithImmed - Select an immediate value that can be represented as
254 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
255 /// Val set to the 12-bit value and Shift set to the shifter operand.
256 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
258 // This function is called from the addsub_shifted_imm ComplexPattern,
259 // which lists [imm] as the list of opcode it's interested in, however
260 // we still need to check whether the operand is actually an immediate
261 // here because the ComplexPattern opcode list is only used in
262 // root-level opcode matching.
263 if (!isa<ConstantSDNode>(N.getNode()))
266 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
269 if (Immed >> 12 == 0) {
271 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
277 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
279 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
280 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
284 /// SelectNegArithImmed - As above, but negates the value before trying to
286 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
288 // This function is called from the addsub_shifted_imm ComplexPattern,
289 // which lists [imm] as the list of opcode it's interested in, however
290 // we still need to check whether the operand is actually an immediate
291 // here because the ComplexPattern opcode list is only used in
292 // root-level opcode matching.
293 if (!isa<ConstantSDNode>(N.getNode()))
296 // The immediate operand must be a 24-bit zero-extended immediate.
297 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
299 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
300 // have the opposite effect on the C flag, so this pattern mustn't match under
301 // those circumstances.
305 if (N.getValueType() == MVT::i32)
306 Immed = ~((uint32_t)Immed) + 1;
308 Immed = ~Immed + 1ULL;
309 if (Immed & 0xFFFFFFFFFF000000ULL)
312 Immed &= 0xFFFFFFULL;
313 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
317 /// getShiftTypeForNode - Translate a shift node to the corresponding
319 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
320 switch (N.getOpcode()) {
322 return AArch64_AM::InvalidShiftExtend;
324 return AArch64_AM::LSL;
326 return AArch64_AM::LSR;
328 return AArch64_AM::ASR;
330 return AArch64_AM::ROR;
334 /// \brief Determine whether it is worth to fold V into an extended register.
335 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
336 // it hurts if the value is used at least twice, unless we are optimizing
338 if (ForCodeSize || V.hasOneUse())
343 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
344 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
345 /// instructions allow the shifted register to be rotated, but the arithmetic
346 /// instructions do not. The AllowROR parameter specifies whether ROR is
348 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
349 SDValue &Reg, SDValue &Shift) {
350 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
351 if (ShType == AArch64_AM::InvalidShiftExtend)
353 if (!AllowROR && ShType == AArch64_AM::ROR)
356 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
357 unsigned BitSize = N.getValueType().getSizeInBits();
358 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
359 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
361 Reg = N.getOperand(0);
362 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
363 return isWorthFolding(N);
369 /// getExtendTypeForNode - Translate an extend node to the corresponding
370 /// ExtendType value.
371 static AArch64_AM::ShiftExtendType
372 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
373 if (N.getOpcode() == ISD::SIGN_EXTEND ||
374 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
376 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
377 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
379 SrcVT = N.getOperand(0).getValueType();
381 if (!IsLoadStore && SrcVT == MVT::i8)
382 return AArch64_AM::SXTB;
383 else if (!IsLoadStore && SrcVT == MVT::i16)
384 return AArch64_AM::SXTH;
385 else if (SrcVT == MVT::i32)
386 return AArch64_AM::SXTW;
387 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
389 return AArch64_AM::InvalidShiftExtend;
390 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
391 N.getOpcode() == ISD::ANY_EXTEND) {
392 EVT SrcVT = N.getOperand(0).getValueType();
393 if (!IsLoadStore && SrcVT == MVT::i8)
394 return AArch64_AM::UXTB;
395 else if (!IsLoadStore && SrcVT == MVT::i16)
396 return AArch64_AM::UXTH;
397 else if (SrcVT == MVT::i32)
398 return AArch64_AM::UXTW;
399 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
401 return AArch64_AM::InvalidShiftExtend;
402 } else if (N.getOpcode() == ISD::AND) {
403 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
405 return AArch64_AM::InvalidShiftExtend;
406 uint64_t AndMask = CSD->getZExtValue();
410 return AArch64_AM::InvalidShiftExtend;
412 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
414 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
416 return AArch64_AM::UXTW;
420 return AArch64_AM::InvalidShiftExtend;
423 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
424 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
425 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
426 DL->getOpcode() != AArch64ISD::DUPLANE32)
429 SDValue SV = DL->getOperand(0);
430 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
433 SDValue EV = SV.getOperand(1);
434 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
437 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
438 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
439 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
440 LaneOp = EV.getOperand(0);
445 // Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a
446 // high lane extract.
447 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
448 SDValue &LaneOp, int &LaneIdx) {
450 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
452 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
459 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
460 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
461 /// so that we don't emit unnecessary lane extracts.
462 SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
464 SDValue Op0 = N->getOperand(0);
465 SDValue Op1 = N->getOperand(1);
466 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
467 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
468 int LaneIdx = -1; // Will hold the lane index.
470 if (Op1.getOpcode() != ISD::MUL ||
471 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
474 if (Op1.getOpcode() != ISD::MUL ||
475 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
480 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
482 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
484 unsigned MLAOpc = ~0U;
486 switch (N->getSimpleValueType(0).SimpleTy) {
488 llvm_unreachable("Unrecognized MLA.");
490 MLAOpc = AArch64::MLAv4i16_indexed;
493 MLAOpc = AArch64::MLAv8i16_indexed;
496 MLAOpc = AArch64::MLAv2i32_indexed;
499 MLAOpc = AArch64::MLAv4i32_indexed;
503 return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops);
506 SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
512 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
516 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
518 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
520 unsigned SMULLOpc = ~0U;
522 if (IntNo == Intrinsic::aarch64_neon_smull) {
523 switch (N->getSimpleValueType(0).SimpleTy) {
525 llvm_unreachable("Unrecognized SMULL.");
527 SMULLOpc = AArch64::SMULLv4i16_indexed;
530 SMULLOpc = AArch64::SMULLv2i32_indexed;
533 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
534 switch (N->getSimpleValueType(0).SimpleTy) {
536 llvm_unreachable("Unrecognized SMULL.");
538 SMULLOpc = AArch64::UMULLv4i16_indexed;
541 SMULLOpc = AArch64::UMULLv2i32_indexed;
545 llvm_unreachable("Unrecognized intrinsic.");
547 return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops);
550 /// Instructions that accept extend modifiers like UXTW expect the register
551 /// being extended to be a GPR32, but the incoming DAG might be acting on a
552 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
553 /// this is the case.
554 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
555 if (N.getValueType() == MVT::i32)
559 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
560 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
561 dl, MVT::i32, N, SubReg);
562 return SDValue(Node, 0);
566 /// SelectArithExtendedRegister - Select a "extended register" operand. This
567 /// operand folds in an extend followed by an optional left shift.
568 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
570 unsigned ShiftVal = 0;
571 AArch64_AM::ShiftExtendType Ext;
573 if (N.getOpcode() == ISD::SHL) {
574 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
577 ShiftVal = CSD->getZExtValue();
581 Ext = getExtendTypeForNode(N.getOperand(0));
582 if (Ext == AArch64_AM::InvalidShiftExtend)
585 Reg = N.getOperand(0).getOperand(0);
587 Ext = getExtendTypeForNode(N);
588 if (Ext == AArch64_AM::InvalidShiftExtend)
591 Reg = N.getOperand(0);
594 // AArch64 mandates that the RHS of the operation must use the smallest
595 // register class that could contain the size being extended from. Thus,
596 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
597 // there might not be an actual 32-bit value in the program. We can
598 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
599 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
600 Reg = narrowIfNeeded(CurDAG, Reg);
601 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
603 return isWorthFolding(N);
606 /// If there's a use of this ADDlow that's not itself a load/store then we'll
607 /// need to create a real ADD instruction from it anyway and there's no point in
608 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
609 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
610 /// leads to duplicated ADRP instructions.
611 static bool isWorthFoldingADDlow(SDValue N) {
612 for (auto Use : N->uses()) {
613 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
614 Use->getOpcode() != ISD::ATOMIC_LOAD &&
615 Use->getOpcode() != ISD::ATOMIC_STORE)
618 // ldar and stlr have much more restrictive addressing modes (just a
620 if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
627 /// SelectAddrModeIndexed7S - Select a "register plus scaled signed 7-bit
628 /// immediate" address. The "Size" argument is the size in bytes of the memory
629 /// reference, which determines the scale.
630 bool AArch64DAGToDAGISel::SelectAddrModeIndexed7S(SDValue N, unsigned Size,
634 const DataLayout &DL = CurDAG->getDataLayout();
635 const TargetLowering *TLI = getTargetLowering();
636 if (N.getOpcode() == ISD::FrameIndex) {
637 int FI = cast<FrameIndexSDNode>(N)->getIndex();
638 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
639 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
643 // As opposed to the (12-bit) Indexed addressing mode below, the 7-bit signed
644 // selected here doesn't support labels/immediates, only base+offset.
646 if (CurDAG->isBaseWithConstantOffset(N)) {
647 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
648 int64_t RHSC = RHS->getSExtValue();
649 unsigned Scale = Log2_32(Size);
650 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(0x40 << Scale) &&
651 RHSC < (0x40 << Scale)) {
652 Base = N.getOperand(0);
653 if (Base.getOpcode() == ISD::FrameIndex) {
654 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
655 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
657 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
663 // Base only. The address will be materialized into a register before
664 // the memory is accessed.
665 // add x0, Xbase, #offset
668 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
672 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
673 /// immediate" address. The "Size" argument is the size in bytes of the memory
674 /// reference, which determines the scale.
675 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
676 SDValue &Base, SDValue &OffImm) {
678 const DataLayout &DL = CurDAG->getDataLayout();
679 const TargetLowering *TLI = getTargetLowering();
680 if (N.getOpcode() == ISD::FrameIndex) {
681 int FI = cast<FrameIndexSDNode>(N)->getIndex();
682 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
683 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
687 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
688 GlobalAddressSDNode *GAN =
689 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
690 Base = N.getOperand(0);
691 OffImm = N.getOperand(1);
695 const GlobalValue *GV = GAN->getGlobal();
696 unsigned Alignment = GV->getAlignment();
697 Type *Ty = GV->getType()->getElementType();
698 if (Alignment == 0 && Ty->isSized())
699 Alignment = DL.getABITypeAlignment(Ty);
701 if (Alignment >= Size)
705 if (CurDAG->isBaseWithConstantOffset(N)) {
706 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
707 int64_t RHSC = (int64_t)RHS->getZExtValue();
708 unsigned Scale = Log2_32(Size);
709 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
710 Base = N.getOperand(0);
711 if (Base.getOpcode() == ISD::FrameIndex) {
712 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
713 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
715 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
721 // Before falling back to our general case, check if the unscaled
722 // instructions can handle this. If so, that's preferable.
723 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
726 // Base only. The address will be materialized into a register before
727 // the memory is accessed.
728 // add x0, Xbase, #offset
731 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
735 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
736 /// immediate" address. This should only match when there is an offset that
737 /// is not valid for a scaled immediate addressing mode. The "Size" argument
738 /// is the size in bytes of the memory reference, which is needed here to know
739 /// what is valid for a scaled immediate.
740 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
743 if (!CurDAG->isBaseWithConstantOffset(N))
745 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
746 int64_t RHSC = RHS->getSExtValue();
747 // If the offset is valid as a scaled immediate, don't match here.
748 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
749 RHSC < (0x1000 << Log2_32(Size)))
751 if (RHSC >= -256 && RHSC < 256) {
752 Base = N.getOperand(0);
753 if (Base.getOpcode() == ISD::FrameIndex) {
754 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
755 const TargetLowering *TLI = getTargetLowering();
756 Base = CurDAG->getTargetFrameIndex(
757 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
759 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
766 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
768 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
769 SDValue ImpDef = SDValue(
770 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
771 MachineSDNode *Node = CurDAG->getMachineNode(
772 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
773 return SDValue(Node, 0);
776 /// \brief Check if the given SHL node (\p N), can be used to form an
777 /// extended register for an addressing mode.
778 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
779 bool WantExtend, SDValue &Offset,
780 SDValue &SignExtend) {
781 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
782 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
783 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
788 AArch64_AM::ShiftExtendType Ext =
789 getExtendTypeForNode(N.getOperand(0), true);
790 if (Ext == AArch64_AM::InvalidShiftExtend)
793 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
794 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
797 Offset = N.getOperand(0);
798 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
801 unsigned LegalShiftVal = Log2_32(Size);
802 unsigned ShiftVal = CSD->getZExtValue();
804 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
807 if (isWorthFolding(N))
813 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
814 SDValue &Base, SDValue &Offset,
817 if (N.getOpcode() != ISD::ADD)
819 SDValue LHS = N.getOperand(0);
820 SDValue RHS = N.getOperand(1);
823 // We don't want to match immediate adds here, because they are better lowered
824 // to the register-immediate addressing modes.
825 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
828 // Check if this particular node is reused in any non-memory related
829 // operation. If yes, do not try to fold this node into the address
830 // computation, since the computation will be kept.
831 const SDNode *Node = N.getNode();
832 for (SDNode *UI : Node->uses()) {
833 if (!isa<MemSDNode>(*UI))
837 // Remember if it is worth folding N when it produces extended register.
838 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
840 // Try to match a shifted extend on the RHS.
841 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
842 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
844 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
848 // Try to match a shifted extend on the LHS.
849 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
850 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
852 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
856 // There was no shift, whatever else we find.
857 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
859 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
860 // Try to match an unshifted extend on the LHS.
861 if (IsExtendedRegisterWorthFolding &&
862 (Ext = getExtendTypeForNode(LHS, true)) !=
863 AArch64_AM::InvalidShiftExtend) {
865 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
866 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
868 if (isWorthFolding(LHS))
872 // Try to match an unshifted extend on the RHS.
873 if (IsExtendedRegisterWorthFolding &&
874 (Ext = getExtendTypeForNode(RHS, true)) !=
875 AArch64_AM::InvalidShiftExtend) {
877 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
878 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
880 if (isWorthFolding(RHS))
887 // Check if the given immediate is preferred by ADD. If an immediate can be
888 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
889 // encoded by one MOVZ, return true.
890 static bool isPreferredADD(int64_t ImmOff) {
891 // Constant in [0x0, 0xfff] can be encoded in ADD.
892 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
894 // Check if it can be encoded in an "ADD LSL #12".
895 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
896 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
897 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
898 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
902 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
903 SDValue &Base, SDValue &Offset,
906 if (N.getOpcode() != ISD::ADD)
908 SDValue LHS = N.getOperand(0);
909 SDValue RHS = N.getOperand(1);
912 // Check if this particular node is reused in any non-memory related
913 // operation. If yes, do not try to fold this node into the address
914 // computation, since the computation will be kept.
915 const SDNode *Node = N.getNode();
916 for (SDNode *UI : Node->uses()) {
917 if (!isa<MemSDNode>(*UI))
921 // Watch out if RHS is a wide immediate, it can not be selected into
922 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
923 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
924 // instructions like:
925 // MOV X0, WideImmediate
926 // ADD X1, BaseReg, X0
928 // For such situation, using [BaseReg, XReg] addressing mode can save one
930 // MOV X0, WideImmediate
931 // LDR X2, [BaseReg, X0]
932 if (isa<ConstantSDNode>(RHS)) {
933 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
934 unsigned Scale = Log2_32(Size);
935 // Skip the immediate can be selected by load/store addressing mode.
936 // Also skip the immediate can be encoded by a single ADD (SUB is also
937 // checked by using -ImmOff).
938 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
939 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
942 SDValue Ops[] = { RHS };
944 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
945 SDValue MOVIV = SDValue(MOVI, 0);
946 // This ADD of two X register will be selected into [Reg+Reg] mode.
947 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
950 // Remember if it is worth folding N when it produces extended register.
951 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
953 // Try to match a shifted extend on the RHS.
954 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
955 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
957 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
961 // Try to match a shifted extend on the LHS.
962 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
963 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
965 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
969 // Match any non-shifted, non-extend, non-immediate add expression.
972 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
973 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
974 // Reg1 + Reg2 is free: no check needed.
978 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
979 static const unsigned RegClassIDs[] = {
980 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
981 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
982 AArch64::dsub2, AArch64::dsub3};
984 return createTuple(Regs, RegClassIDs, SubRegs);
987 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
988 static const unsigned RegClassIDs[] = {
989 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
990 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
991 AArch64::qsub2, AArch64::qsub3};
993 return createTuple(Regs, RegClassIDs, SubRegs);
996 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
997 const unsigned RegClassIDs[],
998 const unsigned SubRegs[]) {
999 // There's no special register-class for a vector-list of 1 element: it's just
1001 if (Regs.size() == 1)
1004 assert(Regs.size() >= 2 && Regs.size() <= 4);
1008 SmallVector<SDValue, 4> Ops;
1010 // First operand of REG_SEQUENCE is the desired RegClass.
1012 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
1014 // Then we get pairs of source & subregister-position for the components.
1015 for (unsigned i = 0; i < Regs.size(); ++i) {
1016 Ops.push_back(Regs[i]);
1017 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
1021 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
1022 return SDValue(N, 0);
1025 SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
1026 unsigned Opc, bool isExt) {
1028 EVT VT = N->getValueType(0);
1030 unsigned ExtOff = isExt;
1032 // Form a REG_SEQUENCE to force register allocation.
1033 unsigned Vec0Off = ExtOff + 1;
1034 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1035 N->op_begin() + Vec0Off + NumVecs);
1036 SDValue RegSeq = createQTuple(Regs);
1038 SmallVector<SDValue, 6> Ops;
1040 Ops.push_back(N->getOperand(1));
1041 Ops.push_back(RegSeq);
1042 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
1043 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
1046 SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
1047 LoadSDNode *LD = cast<LoadSDNode>(N);
1048 if (LD->isUnindexed())
1050 EVT VT = LD->getMemoryVT();
1051 EVT DstVT = N->getValueType(0);
1052 ISD::MemIndexedMode AM = LD->getAddressingMode();
1053 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1055 // We're not doing validity checking here. That was done when checking
1056 // if we should mark the load as indexed or not. We're just selecting
1057 // the right instruction.
1058 unsigned Opcode = 0;
1060 ISD::LoadExtType ExtType = LD->getExtensionType();
1061 bool InsertTo64 = false;
1063 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1064 else if (VT == MVT::i32) {
1065 if (ExtType == ISD::NON_EXTLOAD)
1066 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1067 else if (ExtType == ISD::SEXTLOAD)
1068 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1070 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1072 // The result of the load is only i32. It's the subreg_to_reg that makes
1076 } else if (VT == MVT::i16) {
1077 if (ExtType == ISD::SEXTLOAD) {
1078 if (DstVT == MVT::i64)
1079 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1081 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1083 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1084 InsertTo64 = DstVT == MVT::i64;
1085 // The result of the load is only i32. It's the subreg_to_reg that makes
1089 } else if (VT == MVT::i8) {
1090 if (ExtType == ISD::SEXTLOAD) {
1091 if (DstVT == MVT::i64)
1092 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1094 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1096 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1097 InsertTo64 = DstVT == MVT::i64;
1098 // The result of the load is only i32. It's the subreg_to_reg that makes
1102 } else if (VT == MVT::f16) {
1103 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1104 } else if (VT == MVT::f32) {
1105 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1106 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1107 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1108 } else if (VT.is128BitVector()) {
1109 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1112 SDValue Chain = LD->getChain();
1113 SDValue Base = LD->getBasePtr();
1114 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1115 int OffsetVal = (int)OffsetOp->getZExtValue();
1117 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
1118 SDValue Ops[] = { Base, Offset, Chain };
1119 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
1121 // Either way, we're replacing the node, so tell the caller that.
1123 SDValue LoadedVal = SDValue(Res, 1);
1125 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1127 SDValue(CurDAG->getMachineNode(
1128 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1129 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1134 ReplaceUses(SDValue(N, 0), LoadedVal);
1135 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1136 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1141 SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
1142 unsigned Opc, unsigned SubRegIdx) {
1144 EVT VT = N->getValueType(0);
1145 SDValue Chain = N->getOperand(0);
1147 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1150 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1152 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1153 SDValue SuperReg = SDValue(Ld, 0);
1154 for (unsigned i = 0; i < NumVecs; ++i)
1155 ReplaceUses(SDValue(N, i),
1156 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1158 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1162 SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1163 unsigned Opc, unsigned SubRegIdx) {
1165 EVT VT = N->getValueType(0);
1166 SDValue Chain = N->getOperand(0);
1168 SDValue Ops[] = {N->getOperand(1), // Mem operand
1169 N->getOperand(2), // Incremental
1172 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1173 MVT::Untyped, MVT::Other};
1175 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1177 // Update uses of write back register
1178 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1180 // Update uses of vector list
1181 SDValue SuperReg = SDValue(Ld, 1);
1183 ReplaceUses(SDValue(N, 0), SuperReg);
1185 for (unsigned i = 0; i < NumVecs; ++i)
1186 ReplaceUses(SDValue(N, i),
1187 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1190 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1194 SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1197 EVT VT = N->getOperand(2)->getValueType(0);
1199 // Form a REG_SEQUENCE to force register allocation.
1200 bool Is128Bit = VT.getSizeInBits() == 128;
1201 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1202 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1204 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
1205 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1210 SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1213 EVT VT = N->getOperand(2)->getValueType(0);
1214 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1215 MVT::Other}; // Type for the Chain
1217 // Form a REG_SEQUENCE to force register allocation.
1218 bool Is128Bit = VT.getSizeInBits() == 128;
1219 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1220 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1222 SDValue Ops[] = {RegSeq,
1223 N->getOperand(NumVecs + 1), // base register
1224 N->getOperand(NumVecs + 2), // Incremental
1225 N->getOperand(0)}; // Chain
1226 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1232 /// WidenVector - Given a value in the V64 register class, produce the
1233 /// equivalent value in the V128 register class.
1238 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1240 SDValue operator()(SDValue V64Reg) {
1241 EVT VT = V64Reg.getValueType();
1242 unsigned NarrowSize = VT.getVectorNumElements();
1243 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1244 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1248 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1249 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1254 /// NarrowVector - Given a value in the V128 register class, produce the
1255 /// equivalent value in the V64 register class.
1256 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1257 EVT VT = V128Reg.getValueType();
1258 unsigned WideSize = VT.getVectorNumElements();
1259 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1260 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1262 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1266 SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1269 EVT VT = N->getValueType(0);
1270 bool Narrow = VT.getSizeInBits() == 64;
1272 // Form a REG_SEQUENCE to force register allocation.
1273 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1276 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1277 WidenVector(*CurDAG));
1279 SDValue RegSeq = createQTuple(Regs);
1281 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1284 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1286 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1287 N->getOperand(NumVecs + 3), N->getOperand(0)};
1288 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1289 SDValue SuperReg = SDValue(Ld, 0);
1291 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1292 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1294 for (unsigned i = 0; i < NumVecs; ++i) {
1295 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1297 NV = NarrowVector(NV, *CurDAG);
1298 ReplaceUses(SDValue(N, i), NV);
1301 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1306 SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1309 EVT VT = N->getValueType(0);
1310 bool Narrow = VT.getSizeInBits() == 64;
1312 // Form a REG_SEQUENCE to force register allocation.
1313 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1316 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1317 WidenVector(*CurDAG));
1319 SDValue RegSeq = createQTuple(Regs);
1321 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1322 RegSeq->getValueType(0), MVT::Other};
1325 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1327 SDValue Ops[] = {RegSeq,
1328 CurDAG->getTargetConstant(LaneNo, dl,
1329 MVT::i64), // Lane Number
1330 N->getOperand(NumVecs + 2), // Base register
1331 N->getOperand(NumVecs + 3), // Incremental
1333 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1335 // Update uses of the write back register
1336 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1338 // Update uses of the vector list
1339 SDValue SuperReg = SDValue(Ld, 1);
1341 ReplaceUses(SDValue(N, 0),
1342 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1344 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1345 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1347 for (unsigned i = 0; i < NumVecs; ++i) {
1348 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1351 NV = NarrowVector(NV, *CurDAG);
1352 ReplaceUses(SDValue(N, i), NV);
1357 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1362 SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1365 EVT VT = N->getOperand(2)->getValueType(0);
1366 bool Narrow = VT.getSizeInBits() == 64;
1368 // Form a REG_SEQUENCE to force register allocation.
1369 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1372 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1373 WidenVector(*CurDAG));
1375 SDValue RegSeq = createQTuple(Regs);
1378 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1380 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1381 N->getOperand(NumVecs + 3), N->getOperand(0)};
1382 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1384 // Transfer memoperands.
1385 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1386 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1387 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1392 SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1395 EVT VT = N->getOperand(2)->getValueType(0);
1396 bool Narrow = VT.getSizeInBits() == 64;
1398 // Form a REG_SEQUENCE to force register allocation.
1399 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1402 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1403 WidenVector(*CurDAG));
1405 SDValue RegSeq = createQTuple(Regs);
1407 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1411 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1413 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1414 N->getOperand(NumVecs + 2), // Base Register
1415 N->getOperand(NumVecs + 3), // Incremental
1417 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1419 // Transfer memoperands.
1420 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1421 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1422 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1427 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1428 unsigned &Opc, SDValue &Opd0,
1429 unsigned &LSB, unsigned &MSB,
1430 unsigned NumberOfIgnoredLowBits,
1431 bool BiggerPattern) {
1432 assert(N->getOpcode() == ISD::AND &&
1433 "N must be a AND operation to call this function");
1435 EVT VT = N->getValueType(0);
1437 // Here we can test the type of VT and return false when the type does not
1438 // match, but since it is done prior to that call in the current context
1439 // we turned that into an assert to avoid redundant code.
1440 assert((VT == MVT::i32 || VT == MVT::i64) &&
1441 "Type checking must have been done before calling this function");
1443 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1444 // changed the AND node to a 32-bit mask operation. We'll have to
1445 // undo that as part of the transform here if we want to catch all
1446 // the opportunities.
1447 // Currently the NumberOfIgnoredLowBits argument helps to recover
1448 // form these situations when matching bigger pattern (bitfield insert).
1450 // For unsigned extracts, check for a shift right and mask
1451 uint64_t And_imm = 0;
1452 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1455 const SDNode *Op0 = N->getOperand(0).getNode();
1457 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1458 // simplified. Try to undo that
1459 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1461 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1462 if (And_imm & (And_imm + 1))
1465 bool ClampMSB = false;
1466 uint64_t Srl_imm = 0;
1467 // Handle the SRL + ANY_EXTEND case.
1468 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1469 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1470 // Extend the incoming operand of the SRL to 64-bit.
1471 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1472 // Make sure to clamp the MSB so that we preserve the semantics of the
1473 // original operations.
1475 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1476 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1478 // If the shift result was truncated, we can still combine them.
1479 Opd0 = Op0->getOperand(0).getOperand(0);
1481 // Use the type of SRL node.
1482 VT = Opd0->getValueType(0);
1483 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1484 Opd0 = Op0->getOperand(0);
1485 } else if (BiggerPattern) {
1486 // Let's pretend a 0 shift right has been performed.
1487 // The resulting code will be at least as good as the original one
1488 // plus it may expose more opportunities for bitfield insert pattern.
1489 // FIXME: Currently we limit this to the bigger pattern, because
1490 // some optimizations expect AND and not UBFM.
1491 Opd0 = N->getOperand(0);
1495 // Bail out on large immediates. This happens when no proper
1496 // combining/constant folding was performed.
1497 if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
1499 << ": Found large shift immediate, this should not happen\n"));
1504 MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
1505 : countTrailingOnes<uint64_t>(And_imm)) -
1508 // Since we're moving the extend before the right shift operation, we need
1509 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1510 // the zeros which would get shifted in with the original right shift
1512 MSB = MSB > 31 ? 31 : MSB;
1514 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1518 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1519 SDValue &Opd0, unsigned &LSB,
1521 // We are looking for the following pattern which basically extracts several
1522 // continuous bits from the source value and places it from the LSB of the
1523 // destination value, all other bits of the destination value or set to zero:
1525 // Value2 = AND Value, MaskImm
1526 // SRL Value2, ShiftImm
1528 // with MaskImm >> ShiftImm to search for the bit width.
1530 // This gets selected into a single UBFM:
1532 // UBFM Value, ShiftImm, BitWide + Srl_imm -1
1535 if (N->getOpcode() != ISD::SRL)
1538 uint64_t And_mask = 0;
1539 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1542 Opd0 = N->getOperand(0).getOperand(0);
1544 uint64_t Srl_imm = 0;
1545 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1548 // Check whether we really have several bits extract here.
1549 unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
1550 if (BitWide && isMask_64(And_mask >> Srl_imm)) {
1551 if (N->getValueType(0) == MVT::i32)
1552 Opc = AArch64::UBFMWri;
1554 Opc = AArch64::UBFMXri;
1557 MSB = BitWide + Srl_imm - 1;
1564 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1565 unsigned &Immr, unsigned &Imms,
1566 bool BiggerPattern) {
1567 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1568 "N must be a SHR/SRA operation to call this function");
1570 EVT VT = N->getValueType(0);
1572 // Here we can test the type of VT and return false when the type does not
1573 // match, but since it is done prior to that call in the current context
1574 // we turned that into an assert to avoid redundant code.
1575 assert((VT == MVT::i32 || VT == MVT::i64) &&
1576 "Type checking must have been done before calling this function");
1578 // Check for AND + SRL doing several bits extract.
1579 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
1582 // we're looking for a shift of a shift
1583 uint64_t Shl_imm = 0;
1584 uint64_t Trunc_bits = 0;
1585 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1586 Opd0 = N->getOperand(0).getOperand(0);
1587 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1588 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1589 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1590 // be considered as setting high 32 bits as zero. Our strategy here is to
1591 // always generate 64bit UBFM. This consistency will help the CSE pass
1592 // later find more redundancy.
1593 Opd0 = N->getOperand(0).getOperand(0);
1594 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1595 VT = Opd0->getValueType(0);
1596 assert(VT == MVT::i64 && "the promoted type should be i64");
1597 } else if (BiggerPattern) {
1598 // Let's pretend a 0 shift left has been performed.
1599 // FIXME: Currently we limit this to the bigger pattern case,
1600 // because some optimizations expect AND and not UBFM
1601 Opd0 = N->getOperand(0);
1605 // Missing combines/constant folding may have left us with strange
1607 if (Shl_imm >= VT.getSizeInBits()) {
1609 << ": Found large shift immediate, this should not happen\n"));
1613 uint64_t Srl_imm = 0;
1614 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1617 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1618 "bad amount in shift node!");
1619 int immr = Srl_imm - Shl_imm;
1620 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
1621 Imms = VT.getSizeInBits() - Shl_imm - Trunc_bits - 1;
1622 // SRA requires a signed extraction
1624 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1626 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1630 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1631 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
1632 unsigned NumberOfIgnoredLowBits = 0,
1633 bool BiggerPattern = false) {
1634 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1637 switch (N->getOpcode()) {
1639 if (!N->isMachineOpcode())
1643 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
1644 NumberOfIgnoredLowBits, BiggerPattern);
1647 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
1650 unsigned NOpc = N->getMachineOpcode();
1654 case AArch64::SBFMWri:
1655 case AArch64::UBFMWri:
1656 case AArch64::SBFMXri:
1657 case AArch64::UBFMXri:
1659 Opd0 = N->getOperand(0);
1660 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1661 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1668 SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1669 unsigned Opc, Immr, Imms;
1671 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
1674 EVT VT = N->getValueType(0);
1677 // If the bit extract operation is 64bit but the original type is 32bit, we
1678 // need to add one EXTRACT_SUBREG.
1679 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
1680 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
1681 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
1683 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
1684 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1685 MachineSDNode *Node =
1686 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32,
1687 SDValue(BFM, 0), SubReg);
1691 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1692 CurDAG->getTargetConstant(Imms, dl, VT)};
1693 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1696 /// Does DstMask form a complementary pair with the mask provided by
1697 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1698 /// this asks whether DstMask zeroes precisely those bits that will be set by
1700 static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1701 unsigned NumberOfIgnoredHighBits, EVT VT) {
1702 assert((VT == MVT::i32 || VT == MVT::i64) &&
1703 "i32 or i64 mask type expected!");
1704 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1706 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1707 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1709 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1710 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1713 // Look for bits that will be useful for later uses.
1714 // A bit is consider useless as soon as it is dropped and never used
1715 // before it as been dropped.
1716 // E.g., looking for useful bit of x
1719 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1721 // After #2, the useful bits of x are 0x4.
1722 // However, if x is used on an unpredicatable instruction, then all its bits
1728 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1730 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1733 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1734 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1735 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1736 getUsefulBits(Op, UsefulBits, Depth + 1);
1739 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1740 uint64_t Imm, uint64_t MSB,
1742 // inherit the bitwidth value
1743 APInt OpUsefulBits(UsefulBits);
1747 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1749 // The interesting part will be in the lower part of the result
1750 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1751 // The interesting part was starting at Imm in the argument
1752 OpUsefulBits = OpUsefulBits.shl(Imm);
1754 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1756 // The interesting part will be shifted in the result
1757 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1758 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1759 // The interesting part was at zero in the argument
1760 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1763 UsefulBits &= OpUsefulBits;
1766 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1769 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1771 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1773 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1776 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1778 uint64_t ShiftTypeAndValue =
1779 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1780 APInt Mask(UsefulBits);
1781 Mask.clearAllBits();
1784 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1786 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1787 Mask = Mask.shl(ShiftAmt);
1788 getUsefulBits(Op, Mask, Depth + 1);
1789 Mask = Mask.lshr(ShiftAmt);
1790 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1792 // We do not handle AArch64_AM::ASR, because the sign will change the
1793 // number of useful bits
1794 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1795 Mask = Mask.lshr(ShiftAmt);
1796 getUsefulBits(Op, Mask, Depth + 1);
1797 Mask = Mask.shl(ShiftAmt);
1804 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1807 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1809 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1811 if (Op.getOperand(1) == Orig)
1812 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1814 APInt OpUsefulBits(UsefulBits);
1818 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1820 UsefulBits &= ~OpUsefulBits;
1821 getUsefulBits(Op, UsefulBits, Depth + 1);
1823 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1825 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1826 getUsefulBits(Op, UsefulBits, Depth + 1);
1830 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1831 SDValue Orig, unsigned Depth) {
1833 // Users of this node should have already been instruction selected
1834 // FIXME: Can we turn that into an assert?
1835 if (!UserNode->isMachineOpcode())
1838 switch (UserNode->getMachineOpcode()) {
1841 case AArch64::ANDSWri:
1842 case AArch64::ANDSXri:
1843 case AArch64::ANDWri:
1844 case AArch64::ANDXri:
1845 // We increment Depth only when we call the getUsefulBits
1846 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1848 case AArch64::UBFMWri:
1849 case AArch64::UBFMXri:
1850 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1852 case AArch64::ORRWrs:
1853 case AArch64::ORRXrs:
1854 if (UserNode->getOperand(1) != Orig)
1856 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1858 case AArch64::BFMWri:
1859 case AArch64::BFMXri:
1860 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1864 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1867 // Initialize UsefulBits
1869 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1870 // At the beginning, assume every produced bits is useful
1871 UsefulBits = APInt(Bitwidth, 0);
1872 UsefulBits.flipAllBits();
1874 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1876 for (SDNode *Node : Op.getNode()->uses()) {
1877 // A use cannot produce useful bits
1878 APInt UsefulBitsForUse = APInt(UsefulBits);
1879 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1880 UsersUsefulBits |= UsefulBitsForUse;
1882 // UsefulBits contains the produced bits that are meaningful for the
1883 // current definition, thus a user cannot make a bit meaningful at
1885 UsefulBits &= UsersUsefulBits;
1888 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
1889 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1890 /// 0, return Op unchanged.
1891 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1895 EVT VT = Op.getValueType();
1897 unsigned BitWidth = VT.getSizeInBits();
1898 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1901 if (ShlAmount > 0) {
1902 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1903 ShiftNode = CurDAG->getMachineNode(
1904 UBFMOpc, dl, VT, Op,
1905 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
1906 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
1908 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1909 assert(ShlAmount < 0 && "expected right shift");
1910 int ShrAmount = -ShlAmount;
1911 ShiftNode = CurDAG->getMachineNode(
1912 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
1913 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
1916 return SDValue(ShiftNode, 0);
1919 /// Does this tree qualify as an attempt to move a bitfield into position,
1920 /// essentially "(and (shl VAL, N), Mask)".
1921 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1923 SDValue &Src, int &ShiftAmount,
1925 EVT VT = Op.getValueType();
1926 unsigned BitWidth = VT.getSizeInBits();
1928 assert(BitWidth == 32 || BitWidth == 64);
1930 APInt KnownZero, KnownOne;
1931 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1933 // Non-zero in the sense that they're not provably zero, which is the key
1934 // point if we want to use this value
1935 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1937 // Discard a constant AND mask if present. It's safe because the node will
1938 // already have been factored into the computeKnownBits calculation above.
1940 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1941 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1942 Op = Op.getOperand(0);
1945 // Don't match if the SHL has more than one use, since then we'll end up
1946 // generating SHL+UBFIZ instead of just keeping SHL+AND.
1947 if (!BiggerPattern && !Op.hasOneUse())
1951 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1953 Op = Op.getOperand(0);
1955 if (!isShiftedMask_64(NonZeroBits))
1958 ShiftAmount = countTrailingZeros(NonZeroBits);
1959 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
1961 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1962 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1963 // amount. BiggerPattern is true when this pattern is being matched for BFI,
1964 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
1965 // which case it is not profitable to insert an extra shift.
1966 if (ShlImm - ShiftAmount != 0 && !BiggerPattern)
1968 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1973 // Given a OR operation, check if we have the following pattern
1974 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1975 // isBitfieldExtractOp)
1976 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1977 // countTrailingZeros(mask2) == imm2 - imm + 1
1979 // if yes, given reference arguments will be update so that one can replace
1980 // the OR instruction with:
1981 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1982 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1983 SDValue &Src, unsigned &ImmR,
1984 unsigned &ImmS, SelectionDAG *CurDAG) {
1985 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1988 EVT VT = N->getValueType(0);
1990 Opc = AArch64::BFMWri;
1991 else if (VT == MVT::i64)
1992 Opc = AArch64::BFMXri;
1996 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1997 // have the expected shape. Try to undo that.
1999 getUsefulBits(SDValue(N, 0), UsefulBits);
2001 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
2002 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
2004 // OR is commutative, check all combinations of operand order and values of
2005 // BiggerPattern, i.e.
2006 // Opd0, Opd1, BiggerPattern=false
2007 // Opd1, Opd0, BiggerPattern=false
2008 // Opd0, Opd1, BiggerPattern=true
2009 // Opd1, Opd0, BiggerPattern=true
2010 // Several of these combinations may match, so check with BiggerPattern=false
2011 // first since that will produce better results by matching more instructions
2012 // and/or inserting fewer extra instructions.
2013 for (int I = 0; I < 4; ++I) {
2015 bool BiggerPattern = I / 2;
2016 SDNode *OrOpd0 = N->getOperand(I % 2).getNode();
2017 SDValue OrOpd1Val = N->getOperand((I + 1) % 2);
2018 SDNode *OrOpd1 = OrOpd1Val.getNode();
2022 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
2023 NumberOfIgnoredLowBits, BiggerPattern)) {
2024 // Check that the returned opcode is compatible with the pattern,
2025 // i.e., same type and zero extended (U and not S)
2026 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
2027 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
2030 // Compute the width of the bitfield insertion
2032 Width = ImmS - ImmR + 1;
2033 // FIXME: This constraint is to catch bitfield insertion we may
2034 // want to widen the pattern if we want to grab general bitfied
2039 // If the mask on the insertee is correct, we have a BFXIL operation. We
2040 // can share the ImmR and ImmS values from the already-computed UBFM.
2041 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0),
2043 Src, DstLSB, Width)) {
2044 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2049 // Check the second part of the pattern
2050 EVT VT = OrOpd1->getValueType(0);
2051 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
2053 // Compute the Known Zero for the candidate of the first operand.
2054 // This allows to catch more general case than just looking for
2055 // AND with imm. Indeed, simplify-demanded-bits may have removed
2056 // the AND instruction because it proves it was useless.
2057 APInt KnownZero, KnownOne;
2058 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
2060 // Check if there is enough room for the second operand to appear
2062 APInt BitsToBeInserted =
2063 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
2065 if ((BitsToBeInserted & ~KnownZero) != 0)
2068 // Set the first operand
2070 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
2071 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
2072 // In that case, we can eliminate the AND
2073 Dst = OrOpd1->getOperand(0);
2075 // Maybe the AND has been removed by simplify-demanded-bits
2076 // or is useful because it discards more bits
2086 SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
2087 if (N->getOpcode() != ISD::OR)
2094 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
2097 EVT VT = N->getValueType(0);
2099 SDValue Ops[] = { Opd0,
2101 CurDAG->getTargetConstant(LSB, dl, VT),
2102 CurDAG->getTargetConstant(MSB, dl, VT) };
2103 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2106 /// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
2107 /// equivalent of a left shift by a constant amount followed by an and masking
2108 /// out a contiguous set of bits.
2109 SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertInZeroOp(SDNode *N) {
2110 if (N->getOpcode() != ISD::AND)
2113 EVT VT = N->getValueType(0);
2116 Opc = AArch64::UBFMWri;
2117 else if (VT == MVT::i64)
2118 Opc = AArch64::UBFMXri;
2124 if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
2125 Op0, DstLSB, Width))
2128 // ImmR is the rotate right amount.
2129 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2130 // ImmS is the most significant bit of the source to be moved.
2131 unsigned ImmS = Width - 1;
2134 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT),
2135 CurDAG->getTargetConstant(ImmS, DL, VT)};
2136 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2139 /// GenerateInexactFlagIfNeeded - Insert FRINTX instruction to generate inexact
2140 /// signal on round-to-integer operations if needed. C11 leaves it
2141 /// implementation-defined whether these operations trigger an inexact
2142 /// exception. IEEE says they don't. Unfortunately, Darwin decided they do so
2143 /// we sometimes have to insert a special instruction just to set the right bit
2145 SDNode *AArch64DAGToDAGISel::GenerateInexactFlagIfNeeded(const SDValue &In,
2146 unsigned InTyVariant,
2148 if (Subtarget->isTargetDarwin() && !TM.Options.UnsafeFPMath) {
2149 // Pick the right FRINTX using InTyVariant needed to set the flags.
2150 // InTyVariant is 0 for 32-bit and 1 for 64-bit.
2151 unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
2152 return CurDAG->getMachineNode(FRINTXOpcs[InTyVariant], DL,
2153 In.getValueType(), MVT::Glue, In);
2158 SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) {
2159 EVT VT = N->getValueType(0);
2163 if (VT == MVT::f32) {
2165 } else if (VT == MVT::f64) {
2168 return nullptr; // Unrecognized argument type. Fall back on default codegen.
2170 switch (N->getOpcode()) {
2172 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
2174 unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
2175 Opc = FRINTPOpcs[Variant];
2179 unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
2180 Opc = FRINTMOpcs[Variant];
2184 unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
2185 Opc = FRINTZOpcs[Variant];
2189 unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
2190 Opc = FRINTAOpcs[Variant];
2196 SDValue In = N->getOperand(0);
2197 SmallVector<SDValue, 2> Ops;
2200 if (SDNode *FRINTXNode = GenerateInexactFlagIfNeeded(In, Variant, dl))
2201 Ops.push_back(SDValue(FRINTXNode, 1));
2203 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2206 /// SelectFPConvertWithRound - Try to combine FP rounding and
2207 /// FP-INT conversion.
2208 SDNode *AArch64DAGToDAGISel::SelectFPConvertWithRound(SDNode *N) {
2209 SDNode *Op0 = N->getOperand(0).getNode();
2211 // Return if the round op is used by other nodes, as this would result in two
2212 // FRINTX, one each for round and convert.
2213 if (!Op0->hasOneUse())
2216 unsigned InTyVariant;
2217 EVT InTy = Op0->getValueType(0);
2218 if (InTy == MVT::f32)
2220 else if (InTy == MVT::f64)
2225 unsigned OutTyVariant;
2226 EVT OutTy = N->getValueType(0);
2227 if (OutTy == MVT::i32)
2229 else if (OutTy == MVT::i64)
2234 assert((N->getOpcode() == ISD::FP_TO_SINT
2235 || N->getOpcode() == ISD::FP_TO_UINT) && "Unexpected opcode!");
2236 unsigned FpConVariant = N->getOpcode() == ISD::FP_TO_SINT ? 0 : 1;
2239 switch (Op0->getOpcode()) {
2243 unsigned FCVTPOpcs[2][2][2] = {
2244 { { AArch64::FCVTPSUWSr, AArch64::FCVTPSUXSr },
2245 { AArch64::FCVTPSUWDr, AArch64::FCVTPSUXDr } },
2246 { { AArch64::FCVTPUUWSr, AArch64::FCVTPUUXSr },
2247 { AArch64::FCVTPUUWDr, AArch64::FCVTPUUXDr } } };
2248 Opc = FCVTPOpcs[FpConVariant][InTyVariant][OutTyVariant];
2252 unsigned FCVTMOpcs[2][2][2] = {
2253 { { AArch64::FCVTMSUWSr, AArch64::FCVTMSUXSr },
2254 { AArch64::FCVTMSUWDr, AArch64::FCVTMSUXDr } },
2255 { { AArch64::FCVTMUUWSr, AArch64::FCVTMUUXSr },
2256 { AArch64::FCVTMUUWDr, AArch64::FCVTMUUXDr } } };
2257 Opc = FCVTMOpcs[FpConVariant][InTyVariant][OutTyVariant];
2261 unsigned FCVTZOpcs[2][2][2] = {
2262 { { AArch64::FCVTZSUWSr, AArch64::FCVTZSUXSr },
2263 { AArch64::FCVTZSUWDr, AArch64::FCVTZSUXDr } },
2264 { { AArch64::FCVTZUUWSr, AArch64::FCVTZUUXSr },
2265 { AArch64::FCVTZUUWDr, AArch64::FCVTZUUXDr } } };
2266 Opc = FCVTZOpcs[FpConVariant][InTyVariant][OutTyVariant];
2270 unsigned FCVTAOpcs[2][2][2] = {
2271 { { AArch64::FCVTASUWSr, AArch64::FCVTASUXSr },
2272 { AArch64::FCVTASUWDr, AArch64::FCVTASUXDr } },
2273 { { AArch64::FCVTAUUWSr, AArch64::FCVTAUUXSr },
2274 { AArch64::FCVTAUUWDr, AArch64::FCVTAUUXDr } } };
2275 Opc = FCVTAOpcs[FpConVariant][InTyVariant][OutTyVariant];
2281 SDValue In = Op0->getOperand(0);
2282 SmallVector<SDValue, 2> Ops;
2285 if (SDNode *FRINTXNode = GenerateInexactFlagIfNeeded(In, InTyVariant, DL))
2286 Ops.push_back(SDValue(FRINTXNode, 1));
2288 return CurDAG->getMachineNode(Opc, DL, OutTy, Ops);
2292 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2293 unsigned RegWidth) {
2295 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2296 FVal = CN->getValueAPF();
2297 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2298 // Some otherwise illegal constants are allowed in this case.
2299 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2300 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2303 ConstantPoolSDNode *CN =
2304 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2305 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2309 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2310 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2313 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2314 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2318 // fbits is between 1 and 64 in the worst-case, which means the fmul
2319 // could have 2^64 as an actual operand. Need 65 bits of precision.
2320 APSInt IntVal(65, true);
2321 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2323 // N.b. isPowerOf2 also checks for > 0.
2324 if (!IsExact || !IntVal.isPowerOf2()) return false;
2325 unsigned FBits = IntVal.logBase2();
2327 // Checks above should have guaranteed that we haven't lost information in
2328 // finding FBits, but it must still be in range.
2329 if (FBits == 0 || FBits > RegWidth) return false;
2331 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
2335 // Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2336 // of the string and obtains the integer values from them and combines these
2337 // into a single value to be used in the MRS/MSR instruction.
2338 static int getIntOperandFromRegisterString(StringRef RegString) {
2339 SmallVector<StringRef, 5> Fields;
2340 RegString.split(Fields, ':');
2342 if (Fields.size() == 1)
2345 assert(Fields.size() == 5
2346 && "Invalid number of fields in read register string");
2348 SmallVector<int, 5> Ops;
2349 bool AllIntFields = true;
2351 for (StringRef Field : Fields) {
2353 AllIntFields &= !Field.getAsInteger(10, IntField);
2354 Ops.push_back(IntField);
2357 assert(AllIntFields &&
2358 "Unexpected non-integer value in special register string.");
2360 // Need to combine the integer fields of the string into a single value
2361 // based on the bit encoding of MRS/MSR instruction.
2362 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2363 (Ops[3] << 3) | (Ops[4]);
2366 // Lower the read_register intrinsic to an MRS instruction node if the special
2367 // register string argument is either of the form detailed in the ALCE (the
2368 // form described in getIntOperandsFromRegsterString) or is a named register
2369 // known by the MRS SysReg mapper.
2370 SDNode *AArch64DAGToDAGISel::SelectReadRegister(SDNode *N) {
2371 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2372 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2375 int Reg = getIntOperandFromRegisterString(RegString->getString());
2377 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2379 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2382 // Use the sysreg mapper to map the remaining possible strings to the
2383 // value for the register to be used for the instruction operand.
2384 AArch64SysReg::MRSMapper mapper;
2385 bool IsValidSpecialReg;
2386 Reg = mapper.fromString(RegString->getString(),
2387 Subtarget->getFeatureBits(),
2389 if (IsValidSpecialReg)
2390 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2392 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2398 // Lower the write_register intrinsic to an MSR instruction node if the special
2399 // register string argument is either of the form detailed in the ALCE (the
2400 // form described in getIntOperandsFromRegsterString) or is a named register
2401 // known by the MSR SysReg mapper.
2402 SDNode *AArch64DAGToDAGISel::SelectWriteRegister(SDNode *N) {
2403 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2404 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2407 int Reg = getIntOperandFromRegisterString(RegString->getString());
2409 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2410 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2411 N->getOperand(2), N->getOperand(0));
2413 // Check if the register was one of those allowed as the pstatefield value in
2414 // the MSR (immediate) instruction. To accept the values allowed in the
2415 // pstatefield for the MSR (immediate) instruction, we also require that an
2416 // immediate value has been provided as an argument, we know that this is
2417 // the case as it has been ensured by semantic checking.
2418 AArch64PState::PStateMapper PMapper;
2419 bool IsValidSpecialReg;
2420 Reg = PMapper.fromString(RegString->getString(),
2421 Subtarget->getFeatureBits(),
2423 if (IsValidSpecialReg) {
2424 assert (isa<ConstantSDNode>(N->getOperand(2))
2425 && "Expected a constant integer expression.");
2426 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
2427 return CurDAG->getMachineNode(AArch64::MSRpstate, DL, MVT::Other,
2428 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2429 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
2433 // Use the sysreg mapper to attempt to map the remaining possible strings
2434 // to the value for the register to be used for the MSR (register)
2435 // instruction operand.
2436 AArch64SysReg::MSRMapper Mapper;
2437 Reg = Mapper.fromString(RegString->getString(),
2438 Subtarget->getFeatureBits(),
2441 if (IsValidSpecialReg)
2442 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2443 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2444 N->getOperand(2), N->getOperand(0));
2449 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
2450 // Dump information about the Node being selected
2451 DEBUG(errs() << "Selecting: ");
2452 DEBUG(Node->dump(CurDAG));
2453 DEBUG(errs() << "\n");
2455 // If we have a custom node, we already have selected!
2456 if (Node->isMachineOpcode()) {
2457 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2458 Node->setNodeId(-1);
2462 // Few custom selection stuff.
2463 SDNode *ResNode = nullptr;
2464 EVT VT = Node->getValueType(0);
2466 switch (Node->getOpcode()) {
2470 case ISD::READ_REGISTER:
2471 if (SDNode *Res = SelectReadRegister(Node))
2475 case ISD::WRITE_REGISTER:
2476 if (SDNode *Res = SelectWriteRegister(Node))
2481 if (SDNode *I = SelectMLAV64LaneV128(Node))
2486 // Try to select as an indexed load. Fall through to normal processing
2489 SDNode *I = SelectIndexedLoad(Node, Done);
2498 if (SDNode *I = SelectBitfieldExtractOp(Node))
2500 if (SDNode *I = SelectBitfieldInsertInZeroOp(Node))
2505 if (SDNode *I = SelectBitfieldInsertOp(Node))
2509 case ISD::EXTRACT_VECTOR_ELT: {
2510 // Extracting lane zero is a special case where we can just use a plain
2511 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2512 // the rest of the compiler, especially the register allocator and copyi
2513 // propagation, to reason about, so is preferred when it's possible to
2515 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2516 // Bail and use the default Select() for non-zero lanes.
2517 if (LaneNode->getZExtValue() != 0)
2519 // If the element type is not the same as the result type, likewise
2520 // bail and use the default Select(), as there's more to do than just
2521 // a cross-class COPY. This catches extracts of i8 and i16 elements
2522 // since they will need an explicit zext.
2523 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2526 switch (Node->getOperand(0)
2528 .getVectorElementType()
2531 llvm_unreachable("Unexpected vector element type!");
2533 SubReg = AArch64::dsub;
2536 SubReg = AArch64::ssub;
2539 SubReg = AArch64::hsub;
2542 llvm_unreachable("unexpected zext-requiring extract element!");
2544 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2545 Node->getOperand(0));
2546 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2547 DEBUG(Extract->dumpr(CurDAG));
2548 DEBUG(dbgs() << "\n");
2549 return Extract.getNode();
2551 case ISD::Constant: {
2552 // Materialize zero constants as copies from WZR/XZR. This allows
2553 // the coalescer to propagate these into other instructions.
2554 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2555 if (ConstNode->isNullValue()) {
2557 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2558 AArch64::WZR, MVT::i32).getNode();
2559 else if (VT == MVT::i64)
2560 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2561 AArch64::XZR, MVT::i64).getNode();
2566 case ISD::FrameIndex: {
2567 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2568 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2569 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2570 const TargetLowering *TLI = getTargetLowering();
2571 SDValue TFI = CurDAG->getTargetFrameIndex(
2572 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
2574 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
2575 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
2576 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
2578 case ISD::INTRINSIC_W_CHAIN: {
2579 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2583 case Intrinsic::aarch64_ldaxp:
2584 case Intrinsic::aarch64_ldxp: {
2586 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2587 SDValue MemAddr = Node->getOperand(2);
2589 SDValue Chain = Node->getOperand(0);
2591 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2592 MVT::Other, MemAddr, Chain);
2594 // Transfer memoperands.
2595 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2596 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2597 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2600 case Intrinsic::aarch64_stlxp:
2601 case Intrinsic::aarch64_stxp: {
2603 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2605 SDValue Chain = Node->getOperand(0);
2606 SDValue ValLo = Node->getOperand(2);
2607 SDValue ValHi = Node->getOperand(3);
2608 SDValue MemAddr = Node->getOperand(4);
2610 // Place arguments in the right order.
2611 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
2613 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2614 // Transfer memoperands.
2615 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2616 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2617 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2621 case Intrinsic::aarch64_neon_ld1x2:
2622 if (VT == MVT::v8i8)
2623 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2624 else if (VT == MVT::v16i8)
2625 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
2626 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2627 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
2628 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2629 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2630 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2631 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2632 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2633 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2634 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2635 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2636 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2637 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2639 case Intrinsic::aarch64_neon_ld1x3:
2640 if (VT == MVT::v8i8)
2641 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2642 else if (VT == MVT::v16i8)
2643 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
2644 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2645 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
2646 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2647 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2648 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2649 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2650 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2651 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2652 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2653 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2654 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2655 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2657 case Intrinsic::aarch64_neon_ld1x4:
2658 if (VT == MVT::v8i8)
2659 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2660 else if (VT == MVT::v16i8)
2661 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
2662 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2663 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
2664 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2665 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2666 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2667 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2668 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2669 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2670 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2671 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2672 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2673 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2675 case Intrinsic::aarch64_neon_ld2:
2676 if (VT == MVT::v8i8)
2677 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2678 else if (VT == MVT::v16i8)
2679 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
2680 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2681 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
2682 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2683 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2684 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2685 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2686 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2687 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2688 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2689 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2690 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2691 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2693 case Intrinsic::aarch64_neon_ld3:
2694 if (VT == MVT::v8i8)
2695 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2696 else if (VT == MVT::v16i8)
2697 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
2698 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2699 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
2700 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2701 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2702 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2703 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2704 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2705 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2706 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2707 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2708 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2709 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2711 case Intrinsic::aarch64_neon_ld4:
2712 if (VT == MVT::v8i8)
2713 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2714 else if (VT == MVT::v16i8)
2715 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
2716 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2717 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
2718 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2719 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2720 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2721 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2722 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2723 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2724 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2725 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2726 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2727 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2729 case Intrinsic::aarch64_neon_ld2r:
2730 if (VT == MVT::v8i8)
2731 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2732 else if (VT == MVT::v16i8)
2733 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
2734 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2735 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
2736 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2737 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2738 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2739 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2740 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2741 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2742 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2743 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2744 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2745 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2747 case Intrinsic::aarch64_neon_ld3r:
2748 if (VT == MVT::v8i8)
2749 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2750 else if (VT == MVT::v16i8)
2751 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
2752 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2753 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
2754 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2755 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2756 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2757 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2758 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2759 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2760 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2761 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2762 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2763 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2765 case Intrinsic::aarch64_neon_ld4r:
2766 if (VT == MVT::v8i8)
2767 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2768 else if (VT == MVT::v16i8)
2769 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
2770 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2771 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
2772 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2773 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2774 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2775 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2776 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2777 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2778 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2779 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2780 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2781 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2783 case Intrinsic::aarch64_neon_ld2lane:
2784 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2785 return SelectLoadLane(Node, 2, AArch64::LD2i8);
2786 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2788 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2789 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2791 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2792 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2794 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2796 case Intrinsic::aarch64_neon_ld3lane:
2797 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2798 return SelectLoadLane(Node, 3, AArch64::LD3i8);
2799 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2801 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2802 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2804 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2805 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2807 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2809 case Intrinsic::aarch64_neon_ld4lane:
2810 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2811 return SelectLoadLane(Node, 4, AArch64::LD4i8);
2812 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2814 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2815 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2817 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2818 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2820 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2824 case ISD::INTRINSIC_WO_CHAIN: {
2825 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2829 case Intrinsic::aarch64_neon_tbl2:
2830 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
2831 : AArch64::TBLv16i8Two,
2833 case Intrinsic::aarch64_neon_tbl3:
2834 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
2835 : AArch64::TBLv16i8Three,
2837 case Intrinsic::aarch64_neon_tbl4:
2838 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
2839 : AArch64::TBLv16i8Four,
2841 case Intrinsic::aarch64_neon_tbx2:
2842 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
2843 : AArch64::TBXv16i8Two,
2845 case Intrinsic::aarch64_neon_tbx3:
2846 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
2847 : AArch64::TBXv16i8Three,
2849 case Intrinsic::aarch64_neon_tbx4:
2850 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
2851 : AArch64::TBXv16i8Four,
2853 case Intrinsic::aarch64_neon_smull:
2854 case Intrinsic::aarch64_neon_umull:
2855 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2861 case ISD::INTRINSIC_VOID: {
2862 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2863 if (Node->getNumOperands() >= 3)
2864 VT = Node->getOperand(2)->getValueType(0);
2868 case Intrinsic::aarch64_neon_st1x2: {
2869 if (VT == MVT::v8i8)
2870 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2871 else if (VT == MVT::v16i8)
2872 return SelectStore(Node, 2, AArch64::ST1Twov16b);
2873 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2874 return SelectStore(Node, 2, AArch64::ST1Twov4h);
2875 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2876 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2877 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2878 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2879 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2880 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2881 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2882 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2883 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2884 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2887 case Intrinsic::aarch64_neon_st1x3: {
2888 if (VT == MVT::v8i8)
2889 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2890 else if (VT == MVT::v16i8)
2891 return SelectStore(Node, 3, AArch64::ST1Threev16b);
2892 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2893 return SelectStore(Node, 3, AArch64::ST1Threev4h);
2894 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2895 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2896 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2897 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2898 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2899 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2900 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2901 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2902 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2903 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2906 case Intrinsic::aarch64_neon_st1x4: {
2907 if (VT == MVT::v8i8)
2908 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2909 else if (VT == MVT::v16i8)
2910 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
2911 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2912 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
2913 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2914 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2915 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2916 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2917 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2918 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2919 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2920 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2921 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2922 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2925 case Intrinsic::aarch64_neon_st2: {
2926 if (VT == MVT::v8i8)
2927 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2928 else if (VT == MVT::v16i8)
2929 return SelectStore(Node, 2, AArch64::ST2Twov16b);
2930 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2931 return SelectStore(Node, 2, AArch64::ST2Twov4h);
2932 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2933 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2934 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2935 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2936 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2937 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2938 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2939 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2940 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2941 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2944 case Intrinsic::aarch64_neon_st3: {
2945 if (VT == MVT::v8i8)
2946 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2947 else if (VT == MVT::v16i8)
2948 return SelectStore(Node, 3, AArch64::ST3Threev16b);
2949 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2950 return SelectStore(Node, 3, AArch64::ST3Threev4h);
2951 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2952 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2953 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2954 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2955 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2956 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2957 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2958 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2959 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2960 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2963 case Intrinsic::aarch64_neon_st4: {
2964 if (VT == MVT::v8i8)
2965 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2966 else if (VT == MVT::v16i8)
2967 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
2968 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2969 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
2970 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2971 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2972 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2973 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2974 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2975 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2976 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2977 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2978 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2979 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2982 case Intrinsic::aarch64_neon_st2lane: {
2983 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2984 return SelectStoreLane(Node, 2, AArch64::ST2i8);
2985 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2987 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2988 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2990 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2991 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2993 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2996 case Intrinsic::aarch64_neon_st3lane: {
2997 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2998 return SelectStoreLane(Node, 3, AArch64::ST3i8);
2999 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3001 return SelectStoreLane(Node, 3, AArch64::ST3i16);
3002 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3004 return SelectStoreLane(Node, 3, AArch64::ST3i32);
3005 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3007 return SelectStoreLane(Node, 3, AArch64::ST3i64);
3010 case Intrinsic::aarch64_neon_st4lane: {
3011 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3012 return SelectStoreLane(Node, 4, AArch64::ST4i8);
3013 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3015 return SelectStoreLane(Node, 4, AArch64::ST4i16);
3016 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3018 return SelectStoreLane(Node, 4, AArch64::ST4i32);
3019 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3021 return SelectStoreLane(Node, 4, AArch64::ST4i64);
3027 case AArch64ISD::LD2post: {
3028 if (VT == MVT::v8i8)
3029 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
3030 else if (VT == MVT::v16i8)
3031 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
3032 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3033 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
3034 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3035 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
3036 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3037 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
3038 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3039 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
3040 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3041 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
3042 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3043 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
3046 case AArch64ISD::LD3post: {
3047 if (VT == MVT::v8i8)
3048 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
3049 else if (VT == MVT::v16i8)
3050 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
3051 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3052 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
3053 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3054 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
3055 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3056 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
3057 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3058 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
3059 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3060 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
3061 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3062 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
3065 case AArch64ISD::LD4post: {
3066 if (VT == MVT::v8i8)
3067 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
3068 else if (VT == MVT::v16i8)
3069 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
3070 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3071 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
3072 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3073 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
3074 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3075 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
3076 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3077 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
3078 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3079 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
3080 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3081 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
3084 case AArch64ISD::LD1x2post: {
3085 if (VT == MVT::v8i8)
3086 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
3087 else if (VT == MVT::v16i8)
3088 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
3089 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3090 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
3091 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3092 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
3093 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3094 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
3095 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3096 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
3097 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3098 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
3099 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3100 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
3103 case AArch64ISD::LD1x3post: {
3104 if (VT == MVT::v8i8)
3105 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
3106 else if (VT == MVT::v16i8)
3107 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
3108 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3109 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
3110 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3111 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
3112 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3113 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
3114 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3115 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
3116 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3117 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
3118 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3119 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
3122 case AArch64ISD::LD1x4post: {
3123 if (VT == MVT::v8i8)
3124 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
3125 else if (VT == MVT::v16i8)
3126 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
3127 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3128 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
3129 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3130 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
3131 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3132 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
3133 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3134 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
3135 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3136 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
3137 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3138 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
3141 case AArch64ISD::LD1DUPpost: {
3142 if (VT == MVT::v8i8)
3143 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
3144 else if (VT == MVT::v16i8)
3145 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
3146 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3147 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
3148 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3149 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
3150 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3151 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
3152 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3153 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
3154 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3155 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
3156 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3157 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
3160 case AArch64ISD::LD2DUPpost: {
3161 if (VT == MVT::v8i8)
3162 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
3163 else if (VT == MVT::v16i8)
3164 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
3165 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3166 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
3167 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3168 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
3169 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3170 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
3171 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3172 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
3173 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3174 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
3175 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3176 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
3179 case AArch64ISD::LD3DUPpost: {
3180 if (VT == MVT::v8i8)
3181 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
3182 else if (VT == MVT::v16i8)
3183 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
3184 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3185 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
3186 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3187 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
3188 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3189 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
3190 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3191 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
3192 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3193 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
3194 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3195 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
3198 case AArch64ISD::LD4DUPpost: {
3199 if (VT == MVT::v8i8)
3200 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
3201 else if (VT == MVT::v16i8)
3202 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
3203 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3204 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
3205 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3206 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
3207 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3208 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
3209 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3210 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
3211 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3212 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
3213 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3214 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
3217 case AArch64ISD::LD1LANEpost: {
3218 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3219 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
3220 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3222 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
3223 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3225 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
3226 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3228 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
3231 case AArch64ISD::LD2LANEpost: {
3232 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3233 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
3234 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3236 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
3237 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3239 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
3240 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3242 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
3245 case AArch64ISD::LD3LANEpost: {
3246 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3247 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
3248 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3250 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
3251 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3253 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
3254 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3256 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
3259 case AArch64ISD::LD4LANEpost: {
3260 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3261 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
3262 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3264 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
3265 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3267 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
3268 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3270 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
3273 case AArch64ISD::ST2post: {
3274 VT = Node->getOperand(1).getValueType();
3275 if (VT == MVT::v8i8)
3276 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
3277 else if (VT == MVT::v16i8)
3278 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
3279 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3280 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
3281 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3282 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
3283 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3284 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
3285 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3286 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
3287 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3288 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
3289 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3290 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3293 case AArch64ISD::ST3post: {
3294 VT = Node->getOperand(1).getValueType();
3295 if (VT == MVT::v8i8)
3296 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
3297 else if (VT == MVT::v16i8)
3298 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
3299 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3300 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
3301 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3302 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
3303 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3304 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
3305 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3306 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
3307 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3308 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
3309 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3310 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3313 case AArch64ISD::ST4post: {
3314 VT = Node->getOperand(1).getValueType();
3315 if (VT == MVT::v8i8)
3316 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
3317 else if (VT == MVT::v16i8)
3318 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
3319 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3320 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
3321 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3322 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
3323 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3324 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
3325 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3326 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
3327 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3328 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
3329 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3330 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3333 case AArch64ISD::ST1x2post: {
3334 VT = Node->getOperand(1).getValueType();
3335 if (VT == MVT::v8i8)
3336 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
3337 else if (VT == MVT::v16i8)
3338 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
3339 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3340 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
3341 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3342 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
3343 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3344 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
3345 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3346 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
3347 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3348 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3349 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3350 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
3353 case AArch64ISD::ST1x3post: {
3354 VT = Node->getOperand(1).getValueType();
3355 if (VT == MVT::v8i8)
3356 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
3357 else if (VT == MVT::v16i8)
3358 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
3359 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3360 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
3361 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3362 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
3363 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3364 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
3365 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3366 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
3367 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3368 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3369 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3370 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
3373 case AArch64ISD::ST1x4post: {
3374 VT = Node->getOperand(1).getValueType();
3375 if (VT == MVT::v8i8)
3376 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
3377 else if (VT == MVT::v16i8)
3378 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
3379 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3380 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
3381 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3382 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
3383 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3384 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
3385 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3386 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3387 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3388 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3389 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3390 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3393 case AArch64ISD::ST2LANEpost: {
3394 VT = Node->getOperand(1).getValueType();
3395 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3396 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
3397 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3399 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3400 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3402 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3403 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3405 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3408 case AArch64ISD::ST3LANEpost: {
3409 VT = Node->getOperand(1).getValueType();
3410 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3411 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
3412 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3414 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3415 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3417 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3418 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3420 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3423 case AArch64ISD::ST4LANEpost: {
3424 VT = Node->getOperand(1).getValueType();
3425 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3426 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
3427 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3429 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3430 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3432 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3433 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3435 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3443 if (SDNode *I = SelectLIBM(Node))
3447 case ISD::FP_TO_SINT:
3448 case ISD::FP_TO_UINT:
3449 if (SDNode *I = SelectFPConvertWithRound(Node))
3454 // Select the default instruction
3455 ResNode = SelectCode(Node);
3457 DEBUG(errs() << "=> ");
3458 if (ResNode == nullptr || ResNode == Node)
3459 DEBUG(Node->dump(CurDAG));
3461 DEBUG(ResNode->dump(CurDAG));
3462 DEBUG(errs() << "\n");
3467 /// createAArch64ISelDag - This pass converts a legalized DAG into a
3468 /// AArch64-specific DAG, ready for instruction scheduling.
3469 FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
3470 CodeGenOpt::Level OptLevel) {
3471 return new AArch64DAGToDAGISel(TM, OptLevel);