1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the AArch64 target.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
28 #define DEBUG_TYPE "aarch64-isel"
30 //===--------------------------------------------------------------------===//
31 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32 /// instructions for SelectionDAG operations.
36 class AArch64DAGToDAGISel : public SelectionDAGISel {
37 AArch64TargetMachine &TM;
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
48 : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr),
51 const char *getPassName() const override {
52 return "AArch64 Instruction Selection";
55 bool runOnMachineFunction(MachineFunction &MF) override {
56 ForCodeSize = MF.getFunction()->optForSize();
57 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
58 return SelectionDAGISel::runOnMachineFunction(MF);
61 SDNode *Select(SDNode *Node) override;
63 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
64 /// inline asm expressions.
65 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
66 unsigned ConstraintID,
67 std::vector<SDValue> &OutOps) override;
69 SDNode *SelectMLAV64LaneV128(SDNode *N);
70 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
71 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
72 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
74 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
75 return SelectShiftedRegister(N, false, Reg, Shift);
77 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
78 return SelectShiftedRegister(N, true, Reg, Shift);
80 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
81 return SelectAddrModeIndexed(N, 1, Base, OffImm);
83 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
84 return SelectAddrModeIndexed(N, 2, Base, OffImm);
86 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
87 return SelectAddrModeIndexed(N, 4, Base, OffImm);
89 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
90 return SelectAddrModeIndexed(N, 8, Base, OffImm);
92 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
93 return SelectAddrModeIndexed(N, 16, Base, OffImm);
95 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
96 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
98 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
99 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
101 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
102 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
104 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
105 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
107 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
108 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
112 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
113 SDValue &SignExtend, SDValue &DoShift) {
114 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
118 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
119 SDValue &SignExtend, SDValue &DoShift) {
120 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
124 /// Form sequences of consecutive 64/128-bit registers for use in NEON
125 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
126 /// between 1 and 4 elements. If it contains a single element that is returned
127 /// unchanged; otherwise a REG_SEQUENCE value is returned.
128 SDValue createDTuple(ArrayRef<SDValue> Vecs);
129 SDValue createQTuple(ArrayRef<SDValue> Vecs);
131 /// Generic helper for the createDTuple/createQTuple
132 /// functions. Those should almost always be called instead.
133 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
134 const unsigned SubRegs[]);
136 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
138 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
140 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
142 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
144 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
145 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
147 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
148 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
149 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
150 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
152 SDNode *SelectBitfieldExtractOp(SDNode *N);
153 SDNode *SelectBitfieldInsertOp(SDNode *N);
155 SDNode *SelectLIBM(SDNode *N);
156 SDNode *SelectFPConvertWithRound(SDNode *N);
158 SDNode *SelectReadRegister(SDNode *N);
159 SDNode *SelectWriteRegister(SDNode *N);
161 // Include the pieces autogenerated from the target description.
162 #include "AArch64GenDAGISel.inc"
165 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
167 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
169 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
171 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
172 SDValue &Offset, SDValue &SignExtend,
174 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
175 SDValue &Offset, SDValue &SignExtend,
177 bool isWorthFolding(SDValue V) const;
178 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
179 SDValue &Offset, SDValue &SignExtend);
181 template<unsigned RegWidth>
182 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
183 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
186 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
188 SDNode *GenerateInexactFlagIfNeeded(const SDValue &In, unsigned InTyVariant,
191 } // end anonymous namespace
193 /// isIntImmediate - This method tests to see if the node is a constant
194 /// operand. If so Imm will receive the 32-bit value.
195 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
196 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
197 Imm = C->getZExtValue();
203 // isIntImmediate - This method tests to see if a constant operand.
204 // If so Imm will receive the value.
205 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
206 return isIntImmediate(N.getNode(), Imm);
209 // isOpcWithIntImmediate - This method tests to see if the node is a specific
210 // opcode and that it has a immediate integer right operand.
211 // If so Imm will receive the 32 bit value.
212 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
214 return N->getOpcode() == Opc &&
215 isIntImmediate(N->getOperand(1).getNode(), Imm);
218 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
219 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
220 switch(ConstraintID) {
222 llvm_unreachable("Unexpected asm memory constraint");
223 case InlineAsm::Constraint_i:
224 case InlineAsm::Constraint_m:
225 case InlineAsm::Constraint_Q:
226 // Require the address to be in a register. That is safe for all AArch64
227 // variants and it is hard to do anything much smarter without knowing
228 // how the operand is used.
229 OutOps.push_back(Op);
235 /// SelectArithImmed - Select an immediate value that can be represented as
236 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
237 /// Val set to the 12-bit value and Shift set to the shifter operand.
238 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
240 // This function is called from the addsub_shifted_imm ComplexPattern,
241 // which lists [imm] as the list of opcode it's interested in, however
242 // we still need to check whether the operand is actually an immediate
243 // here because the ComplexPattern opcode list is only used in
244 // root-level opcode matching.
245 if (!isa<ConstantSDNode>(N.getNode()))
248 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
251 if (Immed >> 12 == 0) {
253 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
259 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
261 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
262 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
266 /// SelectNegArithImmed - As above, but negates the value before trying to
268 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
270 // This function is called from the addsub_shifted_imm ComplexPattern,
271 // which lists [imm] as the list of opcode it's interested in, however
272 // we still need to check whether the operand is actually an immediate
273 // here because the ComplexPattern opcode list is only used in
274 // root-level opcode matching.
275 if (!isa<ConstantSDNode>(N.getNode()))
278 // The immediate operand must be a 24-bit zero-extended immediate.
279 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
281 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
282 // have the opposite effect on the C flag, so this pattern mustn't match under
283 // those circumstances.
287 if (N.getValueType() == MVT::i32)
288 Immed = ~((uint32_t)Immed) + 1;
290 Immed = ~Immed + 1ULL;
291 if (Immed & 0xFFFFFFFFFF000000ULL)
294 Immed &= 0xFFFFFFULL;
295 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
299 /// getShiftTypeForNode - Translate a shift node to the corresponding
301 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
302 switch (N.getOpcode()) {
304 return AArch64_AM::InvalidShiftExtend;
306 return AArch64_AM::LSL;
308 return AArch64_AM::LSR;
310 return AArch64_AM::ASR;
312 return AArch64_AM::ROR;
316 /// \brief Determine whether it is worth to fold V into an extended register.
317 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
318 // it hurts if the value is used at least twice, unless we are optimizing
320 if (ForCodeSize || V.hasOneUse())
325 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
326 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
327 /// instructions allow the shifted register to be rotated, but the arithmetic
328 /// instructions do not. The AllowROR parameter specifies whether ROR is
330 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
331 SDValue &Reg, SDValue &Shift) {
332 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
333 if (ShType == AArch64_AM::InvalidShiftExtend)
335 if (!AllowROR && ShType == AArch64_AM::ROR)
338 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
339 unsigned BitSize = N.getValueType().getSizeInBits();
340 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
341 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
343 Reg = N.getOperand(0);
344 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
345 return isWorthFolding(N);
351 /// getExtendTypeForNode - Translate an extend node to the corresponding
352 /// ExtendType value.
353 static AArch64_AM::ShiftExtendType
354 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
355 if (N.getOpcode() == ISD::SIGN_EXTEND ||
356 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
358 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
359 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
361 SrcVT = N.getOperand(0).getValueType();
363 if (!IsLoadStore && SrcVT == MVT::i8)
364 return AArch64_AM::SXTB;
365 else if (!IsLoadStore && SrcVT == MVT::i16)
366 return AArch64_AM::SXTH;
367 else if (SrcVT == MVT::i32)
368 return AArch64_AM::SXTW;
369 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
371 return AArch64_AM::InvalidShiftExtend;
372 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
373 N.getOpcode() == ISD::ANY_EXTEND) {
374 EVT SrcVT = N.getOperand(0).getValueType();
375 if (!IsLoadStore && SrcVT == MVT::i8)
376 return AArch64_AM::UXTB;
377 else if (!IsLoadStore && SrcVT == MVT::i16)
378 return AArch64_AM::UXTH;
379 else if (SrcVT == MVT::i32)
380 return AArch64_AM::UXTW;
381 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
383 return AArch64_AM::InvalidShiftExtend;
384 } else if (N.getOpcode() == ISD::AND) {
385 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
387 return AArch64_AM::InvalidShiftExtend;
388 uint64_t AndMask = CSD->getZExtValue();
392 return AArch64_AM::InvalidShiftExtend;
394 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
396 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
398 return AArch64_AM::UXTW;
402 return AArch64_AM::InvalidShiftExtend;
405 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
406 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
407 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
408 DL->getOpcode() != AArch64ISD::DUPLANE32)
411 SDValue SV = DL->getOperand(0);
412 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
415 SDValue EV = SV.getOperand(1);
416 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
419 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
420 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
421 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
422 LaneOp = EV.getOperand(0);
427 // Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
428 // high lane extract.
429 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
430 SDValue &LaneOp, int &LaneIdx) {
432 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
434 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
441 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
442 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
443 /// so that we don't emit unnecessary lane extracts.
444 SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
446 SDValue Op0 = N->getOperand(0);
447 SDValue Op1 = N->getOperand(1);
448 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
449 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
450 int LaneIdx = -1; // Will hold the lane index.
452 if (Op1.getOpcode() != ISD::MUL ||
453 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
456 if (Op1.getOpcode() != ISD::MUL ||
457 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
462 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
464 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
466 unsigned MLAOpc = ~0U;
468 switch (N->getSimpleValueType(0).SimpleTy) {
470 llvm_unreachable("Unrecognized MLA.");
472 MLAOpc = AArch64::MLAv4i16_indexed;
475 MLAOpc = AArch64::MLAv8i16_indexed;
478 MLAOpc = AArch64::MLAv2i32_indexed;
481 MLAOpc = AArch64::MLAv4i32_indexed;
485 return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops);
488 SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
494 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
498 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
500 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
502 unsigned SMULLOpc = ~0U;
504 if (IntNo == Intrinsic::aarch64_neon_smull) {
505 switch (N->getSimpleValueType(0).SimpleTy) {
507 llvm_unreachable("Unrecognized SMULL.");
509 SMULLOpc = AArch64::SMULLv4i16_indexed;
512 SMULLOpc = AArch64::SMULLv2i32_indexed;
515 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
516 switch (N->getSimpleValueType(0).SimpleTy) {
518 llvm_unreachable("Unrecognized SMULL.");
520 SMULLOpc = AArch64::UMULLv4i16_indexed;
523 SMULLOpc = AArch64::UMULLv2i32_indexed;
527 llvm_unreachable("Unrecognized intrinsic.");
529 return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops);
532 /// Instructions that accept extend modifiers like UXTW expect the register
533 /// being extended to be a GPR32, but the incoming DAG might be acting on a
534 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
535 /// this is the case.
536 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
537 if (N.getValueType() == MVT::i32)
541 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
542 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
543 dl, MVT::i32, N, SubReg);
544 return SDValue(Node, 0);
548 /// SelectArithExtendedRegister - Select a "extended register" operand. This
549 /// operand folds in an extend followed by an optional left shift.
550 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
552 unsigned ShiftVal = 0;
553 AArch64_AM::ShiftExtendType Ext;
555 if (N.getOpcode() == ISD::SHL) {
556 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
559 ShiftVal = CSD->getZExtValue();
563 Ext = getExtendTypeForNode(N.getOperand(0));
564 if (Ext == AArch64_AM::InvalidShiftExtend)
567 Reg = N.getOperand(0).getOperand(0);
569 Ext = getExtendTypeForNode(N);
570 if (Ext == AArch64_AM::InvalidShiftExtend)
573 Reg = N.getOperand(0);
576 // AArch64 mandates that the RHS of the operation must use the smallest
577 // register classs that could contain the size being extended from. Thus,
578 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
579 // there might not be an actual 32-bit value in the program. We can
580 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
581 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
582 Reg = narrowIfNeeded(CurDAG, Reg);
583 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
585 return isWorthFolding(N);
588 /// If there's a use of this ADDlow that's not itself a load/store then we'll
589 /// need to create a real ADD instruction from it anyway and there's no point in
590 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
591 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
592 /// leads to duplaicated ADRP instructions.
593 static bool isWorthFoldingADDlow(SDValue N) {
594 for (auto Use : N->uses()) {
595 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
596 Use->getOpcode() != ISD::ATOMIC_LOAD &&
597 Use->getOpcode() != ISD::ATOMIC_STORE)
600 // ldar and stlr have much more restrictive addressing modes (just a
602 if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
609 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
610 /// immediate" address. The "Size" argument is the size in bytes of the memory
611 /// reference, which determines the scale.
612 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
613 SDValue &Base, SDValue &OffImm) {
615 const DataLayout &DL = CurDAG->getDataLayout();
616 const TargetLowering *TLI = getTargetLowering();
617 if (N.getOpcode() == ISD::FrameIndex) {
618 int FI = cast<FrameIndexSDNode>(N)->getIndex();
619 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
620 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
624 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
625 GlobalAddressSDNode *GAN =
626 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
627 Base = N.getOperand(0);
628 OffImm = N.getOperand(1);
632 const GlobalValue *GV = GAN->getGlobal();
633 unsigned Alignment = GV->getAlignment();
634 Type *Ty = GV->getType()->getElementType();
635 if (Alignment == 0 && Ty->isSized())
636 Alignment = DL.getABITypeAlignment(Ty);
638 if (Alignment >= Size)
642 if (CurDAG->isBaseWithConstantOffset(N)) {
643 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
644 int64_t RHSC = (int64_t)RHS->getZExtValue();
645 unsigned Scale = Log2_32(Size);
646 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
647 Base = N.getOperand(0);
648 if (Base.getOpcode() == ISD::FrameIndex) {
649 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
650 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
652 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
658 // Before falling back to our general case, check if the unscaled
659 // instructions can handle this. If so, that's preferable.
660 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
663 // Base only. The address will be materialized into a register before
664 // the memory is accessed.
665 // add x0, Xbase, #offset
668 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
672 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
673 /// immediate" address. This should only match when there is an offset that
674 /// is not valid for a scaled immediate addressing mode. The "Size" argument
675 /// is the size in bytes of the memory reference, which is needed here to know
676 /// what is valid for a scaled immediate.
677 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
680 if (!CurDAG->isBaseWithConstantOffset(N))
682 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
683 int64_t RHSC = RHS->getSExtValue();
684 // If the offset is valid as a scaled immediate, don't match here.
685 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
686 RHSC < (0x1000 << Log2_32(Size)))
688 if (RHSC >= -256 && RHSC < 256) {
689 Base = N.getOperand(0);
690 if (Base.getOpcode() == ISD::FrameIndex) {
691 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
692 const TargetLowering *TLI = getTargetLowering();
693 Base = CurDAG->getTargetFrameIndex(
694 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
696 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
703 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
705 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
706 SDValue ImpDef = SDValue(
707 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
708 MachineSDNode *Node = CurDAG->getMachineNode(
709 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
710 return SDValue(Node, 0);
713 /// \brief Check if the given SHL node (\p N), can be used to form an
714 /// extended register for an addressing mode.
715 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
716 bool WantExtend, SDValue &Offset,
717 SDValue &SignExtend) {
718 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
719 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
720 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
725 AArch64_AM::ShiftExtendType Ext =
726 getExtendTypeForNode(N.getOperand(0), true);
727 if (Ext == AArch64_AM::InvalidShiftExtend)
730 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
731 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
734 Offset = N.getOperand(0);
735 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
738 unsigned LegalShiftVal = Log2_32(Size);
739 unsigned ShiftVal = CSD->getZExtValue();
741 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
744 if (isWorthFolding(N))
750 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
751 SDValue &Base, SDValue &Offset,
754 if (N.getOpcode() != ISD::ADD)
756 SDValue LHS = N.getOperand(0);
757 SDValue RHS = N.getOperand(1);
760 // We don't want to match immediate adds here, because they are better lowered
761 // to the register-immediate addressing modes.
762 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
765 // Check if this particular node is reused in any non-memory related
766 // operation. If yes, do not try to fold this node into the address
767 // computation, since the computation will be kept.
768 const SDNode *Node = N.getNode();
769 for (SDNode *UI : Node->uses()) {
770 if (!isa<MemSDNode>(*UI))
774 // Remember if it is worth folding N when it produces extended register.
775 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
777 // Try to match a shifted extend on the RHS.
778 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
779 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
781 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
785 // Try to match a shifted extend on the LHS.
786 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
787 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
789 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
793 // There was no shift, whatever else we find.
794 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
796 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
797 // Try to match an unshifted extend on the LHS.
798 if (IsExtendedRegisterWorthFolding &&
799 (Ext = getExtendTypeForNode(LHS, true)) !=
800 AArch64_AM::InvalidShiftExtend) {
802 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
803 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
805 if (isWorthFolding(LHS))
809 // Try to match an unshifted extend on the RHS.
810 if (IsExtendedRegisterWorthFolding &&
811 (Ext = getExtendTypeForNode(RHS, true)) !=
812 AArch64_AM::InvalidShiftExtend) {
814 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
815 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
817 if (isWorthFolding(RHS))
824 // Check if the given immediate is preferred by ADD. If an immediate can be
825 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
826 // encoded by one MOVZ, return true.
827 static bool isPreferredADD(int64_t ImmOff) {
828 // Constant in [0x0, 0xfff] can be encoded in ADD.
829 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
831 // Check if it can be encoded in an "ADD LSL #12".
832 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
833 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
834 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
835 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
839 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
840 SDValue &Base, SDValue &Offset,
843 if (N.getOpcode() != ISD::ADD)
845 SDValue LHS = N.getOperand(0);
846 SDValue RHS = N.getOperand(1);
849 // Check if this particular node is reused in any non-memory related
850 // operation. If yes, do not try to fold this node into the address
851 // computation, since the computation will be kept.
852 const SDNode *Node = N.getNode();
853 for (SDNode *UI : Node->uses()) {
854 if (!isa<MemSDNode>(*UI))
858 // Watch out if RHS is a wide immediate, it can not be selected into
859 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
860 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
861 // instructions like:
862 // MOV X0, WideImmediate
863 // ADD X1, BaseReg, X0
865 // For such situation, using [BaseReg, XReg] addressing mode can save one
867 // MOV X0, WideImmediate
868 // LDR X2, [BaseReg, X0]
869 if (isa<ConstantSDNode>(RHS)) {
870 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
871 unsigned Scale = Log2_32(Size);
872 // Skip the immediate can be seleced by load/store addressing mode.
873 // Also skip the immediate can be encoded by a single ADD (SUB is also
874 // checked by using -ImmOff).
875 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
876 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
879 SDValue Ops[] = { RHS };
881 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
882 SDValue MOVIV = SDValue(MOVI, 0);
883 // This ADD of two X register will be selected into [Reg+Reg] mode.
884 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
887 // Remember if it is worth folding N when it produces extended register.
888 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
890 // Try to match a shifted extend on the RHS.
891 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
892 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
894 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
898 // Try to match a shifted extend on the LHS.
899 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
900 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
902 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
906 // Match any non-shifted, non-extend, non-immediate add expression.
909 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
910 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
911 // Reg1 + Reg2 is free: no check needed.
915 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
916 static const unsigned RegClassIDs[] = {
917 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
918 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
919 AArch64::dsub2, AArch64::dsub3};
921 return createTuple(Regs, RegClassIDs, SubRegs);
924 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
925 static const unsigned RegClassIDs[] = {
926 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
927 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
928 AArch64::qsub2, AArch64::qsub3};
930 return createTuple(Regs, RegClassIDs, SubRegs);
933 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
934 const unsigned RegClassIDs[],
935 const unsigned SubRegs[]) {
936 // There's no special register-class for a vector-list of 1 element: it's just
938 if (Regs.size() == 1)
941 assert(Regs.size() >= 2 && Regs.size() <= 4);
945 SmallVector<SDValue, 4> Ops;
947 // First operand of REG_SEQUENCE is the desired RegClass.
949 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
951 // Then we get pairs of source & subregister-position for the components.
952 for (unsigned i = 0; i < Regs.size(); ++i) {
953 Ops.push_back(Regs[i]);
954 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
958 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
959 return SDValue(N, 0);
962 SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
963 unsigned Opc, bool isExt) {
965 EVT VT = N->getValueType(0);
967 unsigned ExtOff = isExt;
969 // Form a REG_SEQUENCE to force register allocation.
970 unsigned Vec0Off = ExtOff + 1;
971 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
972 N->op_begin() + Vec0Off + NumVecs);
973 SDValue RegSeq = createQTuple(Regs);
975 SmallVector<SDValue, 6> Ops;
977 Ops.push_back(N->getOperand(1));
978 Ops.push_back(RegSeq);
979 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
980 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
983 SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
984 LoadSDNode *LD = cast<LoadSDNode>(N);
985 if (LD->isUnindexed())
987 EVT VT = LD->getMemoryVT();
988 EVT DstVT = N->getValueType(0);
989 ISD::MemIndexedMode AM = LD->getAddressingMode();
990 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
992 // We're not doing validity checking here. That was done when checking
993 // if we should mark the load as indexed or not. We're just selecting
994 // the right instruction.
997 ISD::LoadExtType ExtType = LD->getExtensionType();
998 bool InsertTo64 = false;
1000 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1001 else if (VT == MVT::i32) {
1002 if (ExtType == ISD::NON_EXTLOAD)
1003 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1004 else if (ExtType == ISD::SEXTLOAD)
1005 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1007 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1009 // The result of the load is only i32. It's the subreg_to_reg that makes
1013 } else if (VT == MVT::i16) {
1014 if (ExtType == ISD::SEXTLOAD) {
1015 if (DstVT == MVT::i64)
1016 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1018 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1020 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1021 InsertTo64 = DstVT == MVT::i64;
1022 // The result of the load is only i32. It's the subreg_to_reg that makes
1026 } else if (VT == MVT::i8) {
1027 if (ExtType == ISD::SEXTLOAD) {
1028 if (DstVT == MVT::i64)
1029 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1031 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1033 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1034 InsertTo64 = DstVT == MVT::i64;
1035 // The result of the load is only i32. It's the subreg_to_reg that makes
1039 } else if (VT == MVT::f16) {
1040 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1041 } else if (VT == MVT::f32) {
1042 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1043 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1044 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1045 } else if (VT.is128BitVector()) {
1046 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1049 SDValue Chain = LD->getChain();
1050 SDValue Base = LD->getBasePtr();
1051 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1052 int OffsetVal = (int)OffsetOp->getZExtValue();
1054 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
1055 SDValue Ops[] = { Base, Offset, Chain };
1056 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
1058 // Either way, we're replacing the node, so tell the caller that.
1060 SDValue LoadedVal = SDValue(Res, 1);
1062 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1064 SDValue(CurDAG->getMachineNode(
1065 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1066 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1071 ReplaceUses(SDValue(N, 0), LoadedVal);
1072 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1073 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1078 SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
1079 unsigned Opc, unsigned SubRegIdx) {
1081 EVT VT = N->getValueType(0);
1082 SDValue Chain = N->getOperand(0);
1084 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1087 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1089 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1090 SDValue SuperReg = SDValue(Ld, 0);
1091 for (unsigned i = 0; i < NumVecs; ++i)
1092 ReplaceUses(SDValue(N, i),
1093 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1095 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1099 SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1100 unsigned Opc, unsigned SubRegIdx) {
1102 EVT VT = N->getValueType(0);
1103 SDValue Chain = N->getOperand(0);
1105 SDValue Ops[] = {N->getOperand(1), // Mem operand
1106 N->getOperand(2), // Incremental
1109 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1110 MVT::Untyped, MVT::Other};
1112 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1114 // Update uses of write back register
1115 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1117 // Update uses of vector list
1118 SDValue SuperReg = SDValue(Ld, 1);
1120 ReplaceUses(SDValue(N, 0), SuperReg);
1122 for (unsigned i = 0; i < NumVecs; ++i)
1123 ReplaceUses(SDValue(N, i),
1124 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1127 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1131 SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1134 EVT VT = N->getOperand(2)->getValueType(0);
1136 // Form a REG_SEQUENCE to force register allocation.
1137 bool Is128Bit = VT.getSizeInBits() == 128;
1138 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1139 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1141 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
1142 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1147 SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1150 EVT VT = N->getOperand(2)->getValueType(0);
1151 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1152 MVT::Other}; // Type for the Chain
1154 // Form a REG_SEQUENCE to force register allocation.
1155 bool Is128Bit = VT.getSizeInBits() == 128;
1156 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1157 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1159 SDValue Ops[] = {RegSeq,
1160 N->getOperand(NumVecs + 1), // base register
1161 N->getOperand(NumVecs + 2), // Incremental
1162 N->getOperand(0)}; // Chain
1163 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1169 /// WidenVector - Given a value in the V64 register class, produce the
1170 /// equivalent value in the V128 register class.
1175 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1177 SDValue operator()(SDValue V64Reg) {
1178 EVT VT = V64Reg.getValueType();
1179 unsigned NarrowSize = VT.getVectorNumElements();
1180 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1181 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1185 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1186 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1191 /// NarrowVector - Given a value in the V128 register class, produce the
1192 /// equivalent value in the V64 register class.
1193 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1194 EVT VT = V128Reg.getValueType();
1195 unsigned WideSize = VT.getVectorNumElements();
1196 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1197 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1199 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1203 SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1206 EVT VT = N->getValueType(0);
1207 bool Narrow = VT.getSizeInBits() == 64;
1209 // Form a REG_SEQUENCE to force register allocation.
1210 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1213 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1214 WidenVector(*CurDAG));
1216 SDValue RegSeq = createQTuple(Regs);
1218 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1221 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1223 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1224 N->getOperand(NumVecs + 3), N->getOperand(0)};
1225 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1226 SDValue SuperReg = SDValue(Ld, 0);
1228 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1229 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1231 for (unsigned i = 0; i < NumVecs; ++i) {
1232 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1234 NV = NarrowVector(NV, *CurDAG);
1235 ReplaceUses(SDValue(N, i), NV);
1238 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1243 SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1246 EVT VT = N->getValueType(0);
1247 bool Narrow = VT.getSizeInBits() == 64;
1249 // Form a REG_SEQUENCE to force register allocation.
1250 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1253 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1254 WidenVector(*CurDAG));
1256 SDValue RegSeq = createQTuple(Regs);
1258 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1259 RegSeq->getValueType(0), MVT::Other};
1262 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1264 SDValue Ops[] = {RegSeq,
1265 CurDAG->getTargetConstant(LaneNo, dl,
1266 MVT::i64), // Lane Number
1267 N->getOperand(NumVecs + 2), // Base register
1268 N->getOperand(NumVecs + 3), // Incremental
1270 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1272 // Update uses of the write back register
1273 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1275 // Update uses of the vector list
1276 SDValue SuperReg = SDValue(Ld, 1);
1278 ReplaceUses(SDValue(N, 0),
1279 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1281 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1282 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1284 for (unsigned i = 0; i < NumVecs; ++i) {
1285 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1288 NV = NarrowVector(NV, *CurDAG);
1289 ReplaceUses(SDValue(N, i), NV);
1294 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1299 SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1302 EVT VT = N->getOperand(2)->getValueType(0);
1303 bool Narrow = VT.getSizeInBits() == 64;
1305 // Form a REG_SEQUENCE to force register allocation.
1306 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1309 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1310 WidenVector(*CurDAG));
1312 SDValue RegSeq = createQTuple(Regs);
1315 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1317 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1318 N->getOperand(NumVecs + 3), N->getOperand(0)};
1319 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1321 // Transfer memoperands.
1322 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1323 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1324 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1329 SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1332 EVT VT = N->getOperand(2)->getValueType(0);
1333 bool Narrow = VT.getSizeInBits() == 64;
1335 // Form a REG_SEQUENCE to force register allocation.
1336 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1339 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1340 WidenVector(*CurDAG));
1342 SDValue RegSeq = createQTuple(Regs);
1344 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1348 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1350 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1351 N->getOperand(NumVecs + 2), // Base Register
1352 N->getOperand(NumVecs + 3), // Incremental
1354 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1356 // Transfer memoperands.
1357 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1358 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1359 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1364 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1365 unsigned &Opc, SDValue &Opd0,
1366 unsigned &LSB, unsigned &MSB,
1367 unsigned NumberOfIgnoredLowBits,
1368 bool BiggerPattern) {
1369 assert(N->getOpcode() == ISD::AND &&
1370 "N must be a AND operation to call this function");
1372 EVT VT = N->getValueType(0);
1374 // Here we can test the type of VT and return false when the type does not
1375 // match, but since it is done prior to that call in the current context
1376 // we turned that into an assert to avoid redundant code.
1377 assert((VT == MVT::i32 || VT == MVT::i64) &&
1378 "Type checking must have been done before calling this function");
1380 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1381 // changed the AND node to a 32-bit mask operation. We'll have to
1382 // undo that as part of the transform here if we want to catch all
1383 // the opportunities.
1384 // Currently the NumberOfIgnoredLowBits argument helps to recover
1385 // form these situations when matching bigger pattern (bitfield insert).
1387 // For unsigned extracts, check for a shift right and mask
1388 uint64_t And_imm = 0;
1389 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1392 const SDNode *Op0 = N->getOperand(0).getNode();
1394 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1395 // simplified. Try to undo that
1396 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1398 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1399 if (And_imm & (And_imm + 1))
1402 bool ClampMSB = false;
1403 uint64_t Srl_imm = 0;
1404 // Handle the SRL + ANY_EXTEND case.
1405 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1406 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1407 // Extend the incoming operand of the SRL to 64-bit.
1408 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1409 // Make sure to clamp the MSB so that we preserve the semantics of the
1410 // original operations.
1412 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1413 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1415 // If the shift result was truncated, we can still combine them.
1416 Opd0 = Op0->getOperand(0).getOperand(0);
1418 // Use the type of SRL node.
1419 VT = Opd0->getValueType(0);
1420 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1421 Opd0 = Op0->getOperand(0);
1422 } else if (BiggerPattern) {
1423 // Let's pretend a 0 shift right has been performed.
1424 // The resulting code will be at least as good as the original one
1425 // plus it may expose more opportunities for bitfield insert pattern.
1426 // FIXME: Currently we limit this to the bigger pattern, because
1427 // some optimizations expect AND and not UBFM
1428 Opd0 = N->getOperand(0);
1432 // Bail out on large immediates. This happens when no proper
1433 // combining/constant folding was performed.
1434 if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
1436 << ": Found large shift immediate, this should not happen\n"));
1441 MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
1442 : countTrailingOnes<uint64_t>(And_imm)) -
1445 // Since we're moving the extend before the right shift operation, we need
1446 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1447 // the zeros which would get shifted in with the original right shift
1449 MSB = MSB > 31 ? 31 : MSB;
1451 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1455 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1456 SDValue &Opd0, unsigned &LSB,
1458 // We are looking for the following pattern which basically extracts several
1459 // continuous bits from the source value and places it from the LSB of the
1460 // destination value, all other bits of the destination value or set to zero:
1462 // Value2 = AND Value, MaskImm
1463 // SRL Value2, ShiftImm
1465 // with MaskImm >> ShiftImm to search for the bit width.
1467 // This gets selected into a single UBFM:
1469 // UBFM Value, ShiftImm, BitWide + Srl_imm -1
1472 if (N->getOpcode() != ISD::SRL)
1475 uint64_t And_mask = 0;
1476 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1479 Opd0 = N->getOperand(0).getOperand(0);
1481 uint64_t Srl_imm = 0;
1482 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1485 // Check whether we really have several bits extract here.
1486 unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
1487 if (BitWide && isMask_64(And_mask >> Srl_imm)) {
1488 if (N->getValueType(0) == MVT::i32)
1489 Opc = AArch64::UBFMWri;
1491 Opc = AArch64::UBFMXri;
1494 MSB = BitWide + Srl_imm - 1;
1501 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1502 unsigned &Immr, unsigned &Imms,
1503 bool BiggerPattern) {
1504 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1505 "N must be a SHR/SRA operation to call this function");
1507 EVT VT = N->getValueType(0);
1509 // Here we can test the type of VT and return false when the type does not
1510 // match, but since it is done prior to that call in the current context
1511 // we turned that into an assert to avoid redundant code.
1512 assert((VT == MVT::i32 || VT == MVT::i64) &&
1513 "Type checking must have been done before calling this function");
1515 // Check for AND + SRL doing several bits extract.
1516 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
1519 // we're looking for a shift of a shift
1520 uint64_t Shl_imm = 0;
1521 uint64_t Trunc_bits = 0;
1522 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1523 Opd0 = N->getOperand(0).getOperand(0);
1524 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1525 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1526 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1527 // be considered as setting high 32 bits as zero. Our strategy here is to
1528 // always generate 64bit UBFM. This consistency will help the CSE pass
1529 // later find more redundancy.
1530 Opd0 = N->getOperand(0).getOperand(0);
1531 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1532 VT = Opd0->getValueType(0);
1533 assert(VT == MVT::i64 && "the promoted type should be i64");
1534 } else if (BiggerPattern) {
1535 // Let's pretend a 0 shift left has been performed.
1536 // FIXME: Currently we limit this to the bigger pattern case,
1537 // because some optimizations expect AND and not UBFM
1538 Opd0 = N->getOperand(0);
1542 // Missing combines/constant folding may have left us with strange
1544 if (Shl_imm >= VT.getSizeInBits()) {
1546 << ": Found large shift immediate, this should not happen\n"));
1550 uint64_t Srl_imm = 0;
1551 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1554 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1555 "bad amount in shift node!");
1556 int immr = Srl_imm - Shl_imm;
1557 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
1558 Imms = VT.getSizeInBits() - Shl_imm - Trunc_bits - 1;
1559 // SRA requires a signed extraction
1561 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1563 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1567 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1568 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
1569 unsigned NumberOfIgnoredLowBits = 0,
1570 bool BiggerPattern = false) {
1571 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1574 switch (N->getOpcode()) {
1576 if (!N->isMachineOpcode())
1580 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
1581 NumberOfIgnoredLowBits, BiggerPattern);
1584 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
1587 unsigned NOpc = N->getMachineOpcode();
1591 case AArch64::SBFMWri:
1592 case AArch64::UBFMWri:
1593 case AArch64::SBFMXri:
1594 case AArch64::UBFMXri:
1596 Opd0 = N->getOperand(0);
1597 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1598 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1605 SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1606 unsigned Opc, Immr, Imms;
1608 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
1611 EVT VT = N->getValueType(0);
1614 // If the bit extract operation is 64bit but the original type is 32bit, we
1615 // need to add one EXTRACT_SUBREG.
1616 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
1617 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
1618 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
1620 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
1621 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1622 MachineSDNode *Node =
1623 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32,
1624 SDValue(BFM, 0), SubReg);
1628 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1629 CurDAG->getTargetConstant(Imms, dl, VT)};
1630 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1633 /// Does DstMask form a complementary pair with the mask provided by
1634 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1635 /// this asks whether DstMask zeroes precisely those bits that will be set by
1637 static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1638 unsigned NumberOfIgnoredHighBits, EVT VT) {
1639 assert((VT == MVT::i32 || VT == MVT::i64) &&
1640 "i32 or i64 mask type expected!");
1641 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1643 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1644 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1646 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1647 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1650 // Look for bits that will be useful for later uses.
1651 // A bit is consider useless as soon as it is dropped and never used
1652 // before it as been dropped.
1653 // E.g., looking for useful bit of x
1656 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1658 // After #2, the useful bits of x are 0x4.
1659 // However, if x is used on an unpredicatable instruction, then all its bits
1665 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1667 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1670 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1671 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1672 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1673 getUsefulBits(Op, UsefulBits, Depth + 1);
1676 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1677 uint64_t Imm, uint64_t MSB,
1679 // inherit the bitwidth value
1680 APInt OpUsefulBits(UsefulBits);
1684 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1686 // The interesting part will be in the lower part of the result
1687 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1688 // The interesting part was starting at Imm in the argument
1689 OpUsefulBits = OpUsefulBits.shl(Imm);
1691 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1693 // The interesting part will be shifted in the result
1694 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1695 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1696 // The interesting part was at zero in the argument
1697 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1700 UsefulBits &= OpUsefulBits;
1703 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1706 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1708 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1710 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1713 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1715 uint64_t ShiftTypeAndValue =
1716 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1717 APInt Mask(UsefulBits);
1718 Mask.clearAllBits();
1721 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1723 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1724 Mask = Mask.shl(ShiftAmt);
1725 getUsefulBits(Op, Mask, Depth + 1);
1726 Mask = Mask.lshr(ShiftAmt);
1727 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1729 // We do not handle AArch64_AM::ASR, because the sign will change the
1730 // number of useful bits
1731 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1732 Mask = Mask.lshr(ShiftAmt);
1733 getUsefulBits(Op, Mask, Depth + 1);
1734 Mask = Mask.shl(ShiftAmt);
1741 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1744 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1746 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1748 if (Op.getOperand(1) == Orig)
1749 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1751 APInt OpUsefulBits(UsefulBits);
1755 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1757 UsefulBits &= ~OpUsefulBits;
1758 getUsefulBits(Op, UsefulBits, Depth + 1);
1760 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1762 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1763 getUsefulBits(Op, UsefulBits, Depth + 1);
1767 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1768 SDValue Orig, unsigned Depth) {
1770 // Users of this node should have already been instruction selected
1771 // FIXME: Can we turn that into an assert?
1772 if (!UserNode->isMachineOpcode())
1775 switch (UserNode->getMachineOpcode()) {
1778 case AArch64::ANDSWri:
1779 case AArch64::ANDSXri:
1780 case AArch64::ANDWri:
1781 case AArch64::ANDXri:
1782 // We increment Depth only when we call the getUsefulBits
1783 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1785 case AArch64::UBFMWri:
1786 case AArch64::UBFMXri:
1787 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1789 case AArch64::ORRWrs:
1790 case AArch64::ORRXrs:
1791 if (UserNode->getOperand(1) != Orig)
1793 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1795 case AArch64::BFMWri:
1796 case AArch64::BFMXri:
1797 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1801 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1804 // Initialize UsefulBits
1806 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1807 // At the beginning, assume every produced bits is useful
1808 UsefulBits = APInt(Bitwidth, 0);
1809 UsefulBits.flipAllBits();
1811 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1813 for (SDNode *Node : Op.getNode()->uses()) {
1814 // A use cannot produce useful bits
1815 APInt UsefulBitsForUse = APInt(UsefulBits);
1816 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1817 UsersUsefulBits |= UsefulBitsForUse;
1819 // UsefulBits contains the produced bits that are meaningful for the
1820 // current definition, thus a user cannot make a bit meaningful at
1822 UsefulBits &= UsersUsefulBits;
1825 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
1826 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1827 /// 0, return Op unchanged.
1828 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1832 EVT VT = Op.getValueType();
1834 unsigned BitWidth = VT.getSizeInBits();
1835 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1838 if (ShlAmount > 0) {
1839 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1840 ShiftNode = CurDAG->getMachineNode(
1841 UBFMOpc, dl, VT, Op,
1842 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
1843 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
1845 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1846 assert(ShlAmount < 0 && "expected right shift");
1847 int ShrAmount = -ShlAmount;
1848 ShiftNode = CurDAG->getMachineNode(
1849 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
1850 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
1853 return SDValue(ShiftNode, 0);
1856 /// Does this tree qualify as an attempt to move a bitfield into position,
1857 /// essentially "(and (shl VAL, N), Mask)".
1858 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1859 SDValue &Src, int &ShiftAmount,
1861 EVT VT = Op.getValueType();
1862 unsigned BitWidth = VT.getSizeInBits();
1864 assert(BitWidth == 32 || BitWidth == 64);
1866 APInt KnownZero, KnownOne;
1867 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1869 // Non-zero in the sense that they're not provably zero, which is the key
1870 // point if we want to use this value
1871 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1873 // Discard a constant AND mask if present. It's safe because the node will
1874 // already have been factored into the computeKnownBits calculation above.
1876 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1877 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1878 Op = Op.getOperand(0);
1882 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1884 Op = Op.getOperand(0);
1886 if (!isShiftedMask_64(NonZeroBits))
1889 ShiftAmount = countTrailingZeros(NonZeroBits);
1890 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
1892 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1893 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1895 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1900 // Given a OR operation, check if we have the following pattern
1901 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1902 // isBitfieldExtractOp)
1903 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1904 // countTrailingZeros(mask2) == imm2 - imm + 1
1906 // if yes, given reference arguments will be update so that one can replace
1907 // the OR instruction with:
1908 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1909 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1910 SDValue &Src, unsigned &ImmR,
1911 unsigned &ImmS, SelectionDAG *CurDAG) {
1912 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1915 EVT VT = N->getValueType(0);
1917 Opc = AArch64::BFMWri;
1918 else if (VT == MVT::i64)
1919 Opc = AArch64::BFMXri;
1923 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1924 // have the expected shape. Try to undo that.
1926 getUsefulBits(SDValue(N, 0), UsefulBits);
1928 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1929 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1931 // OR is commutative, check both possibilities (does llvm provide a
1932 // way to do that directely, e.g., via code matcher?)
1933 SDValue OrOpd1Val = N->getOperand(1);
1934 SDNode *OrOpd0 = N->getOperand(0).getNode();
1935 SDNode *OrOpd1 = N->getOperand(1).getNode();
1936 for (int i = 0; i < 2;
1937 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1940 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
1941 NumberOfIgnoredLowBits, true)) {
1942 // Check that the returned opcode is compatible with the pattern,
1943 // i.e., same type and zero extended (U and not S)
1944 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
1945 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
1948 // Compute the width of the bitfield insertion
1950 Width = ImmS - ImmR + 1;
1951 // FIXME: This constraint is to catch bitfield insertion we may
1952 // want to widen the pattern if we want to grab general bitfied
1957 // If the mask on the insertee is correct, we have a BFXIL operation. We
1958 // can share the ImmR and ImmS values from the already-computed UBFM.
1959 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
1961 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
1966 // Check the second part of the pattern
1967 EVT VT = OrOpd1->getValueType(0);
1968 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
1970 // Compute the Known Zero for the candidate of the first operand.
1971 // This allows to catch more general case than just looking for
1972 // AND with imm. Indeed, simplify-demanded-bits may have removed
1973 // the AND instruction because it proves it was useless.
1974 APInt KnownZero, KnownOne;
1975 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
1977 // Check if there is enough room for the second operand to appear
1979 APInt BitsToBeInserted =
1980 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
1982 if ((BitsToBeInserted & ~KnownZero) != 0)
1985 // Set the first operand
1987 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1988 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
1989 // In that case, we can eliminate the AND
1990 Dst = OrOpd1->getOperand(0);
1992 // Maybe the AND has been removed by simplify-demanded-bits
1993 // or is useful because it discards more bits
2003 SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
2004 if (N->getOpcode() != ISD::OR)
2011 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
2014 EVT VT = N->getValueType(0);
2016 SDValue Ops[] = { Opd0,
2018 CurDAG->getTargetConstant(LSB, dl, VT),
2019 CurDAG->getTargetConstant(MSB, dl, VT) };
2020 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2023 /// GenerateInexactFlagIfNeeded - Insert FRINTX instruction to generate inexact
2024 /// signal on round-to-integer operations if needed. C11 leaves it
2025 /// implementation-defined whether these operations trigger an inexact
2026 /// exception. IEEE says they don't. Unfortunately, Darwin decided they do so
2027 /// we sometimes have to insert a special instruction just to set the right bit
2029 SDNode *AArch64DAGToDAGISel::GenerateInexactFlagIfNeeded(const SDValue &In,
2030 unsigned InTyVariant,
2032 if (Subtarget->isTargetDarwin() && !TM.Options.UnsafeFPMath) {
2033 // Pick the right FRINTX using InTyVariant needed to set the flags.
2034 // InTyVariant is 0 for 32-bit and 1 for 64-bit.
2035 unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
2036 return CurDAG->getMachineNode(FRINTXOpcs[InTyVariant], DL,
2037 In.getValueType(), MVT::Glue, In);
2042 SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) {
2043 EVT VT = N->getValueType(0);
2047 if (VT == MVT::f32) {
2049 } else if (VT == MVT::f64) {
2052 return nullptr; // Unrecognized argument type. Fall back on default codegen.
2054 switch (N->getOpcode()) {
2056 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
2058 unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
2059 Opc = FRINTPOpcs[Variant];
2063 unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
2064 Opc = FRINTMOpcs[Variant];
2068 unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
2069 Opc = FRINTZOpcs[Variant];
2073 unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
2074 Opc = FRINTAOpcs[Variant];
2080 SDValue In = N->getOperand(0);
2081 SmallVector<SDValue, 2> Ops;
2084 if (SDNode *FRINTXNode = GenerateInexactFlagIfNeeded(In, Variant, dl))
2085 Ops.push_back(SDValue(FRINTXNode, 1));
2087 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2090 /// SelectFPConvertWithRound - Try to combine FP rounding and
2091 /// FP-INT conversion.
2092 SDNode *AArch64DAGToDAGISel::SelectFPConvertWithRound(SDNode *N) {
2093 SDNode *Op0 = N->getOperand(0).getNode();
2095 // Return if the round op is used by other nodes, as this would result in two
2096 // FRINTX, one each for round and convert.
2097 if (!Op0->hasOneUse())
2100 unsigned InTyVariant;
2101 EVT InTy = Op0->getValueType(0);
2102 if (InTy == MVT::f32)
2104 else if (InTy == MVT::f64)
2109 unsigned OutTyVariant;
2110 EVT OutTy = N->getValueType(0);
2111 if (OutTy == MVT::i32)
2113 else if (OutTy == MVT::i64)
2118 assert((N->getOpcode() == ISD::FP_TO_SINT
2119 || N->getOpcode() == ISD::FP_TO_UINT) && "Unexpected opcode!");
2120 unsigned FpConVariant = N->getOpcode() == ISD::FP_TO_SINT ? 0 : 1;
2123 switch (Op0->getOpcode()) {
2127 unsigned FCVTPOpcs[2][2][2] = {
2128 { { AArch64::FCVTPSUWSr, AArch64::FCVTPSUXSr },
2129 { AArch64::FCVTPSUWDr, AArch64::FCVTPSUXDr } },
2130 { { AArch64::FCVTPUUWSr, AArch64::FCVTPUUXSr },
2131 { AArch64::FCVTPUUWDr, AArch64::FCVTPUUXDr } } };
2132 Opc = FCVTPOpcs[FpConVariant][InTyVariant][OutTyVariant];
2136 unsigned FCVTMOpcs[2][2][2] = {
2137 { { AArch64::FCVTMSUWSr, AArch64::FCVTMSUXSr },
2138 { AArch64::FCVTMSUWDr, AArch64::FCVTMSUXDr } },
2139 { { AArch64::FCVTMUUWSr, AArch64::FCVTMUUXSr },
2140 { AArch64::FCVTMUUWDr, AArch64::FCVTMUUXDr } } };
2141 Opc = FCVTMOpcs[FpConVariant][InTyVariant][OutTyVariant];
2145 unsigned FCVTZOpcs[2][2][2] = {
2146 { { AArch64::FCVTZSUWSr, AArch64::FCVTZSUXSr },
2147 { AArch64::FCVTZSUWDr, AArch64::FCVTZSUXDr } },
2148 { { AArch64::FCVTZUUWSr, AArch64::FCVTZUUXSr },
2149 { AArch64::FCVTZUUWDr, AArch64::FCVTZUUXDr } } };
2150 Opc = FCVTZOpcs[FpConVariant][InTyVariant][OutTyVariant];
2154 unsigned FCVTAOpcs[2][2][2] = {
2155 { { AArch64::FCVTASUWSr, AArch64::FCVTASUXSr },
2156 { AArch64::FCVTASUWDr, AArch64::FCVTASUXDr } },
2157 { { AArch64::FCVTAUUWSr, AArch64::FCVTAUUXSr },
2158 { AArch64::FCVTAUUWDr, AArch64::FCVTAUUXDr } } };
2159 Opc = FCVTAOpcs[FpConVariant][InTyVariant][OutTyVariant];
2165 SDValue In = Op0->getOperand(0);
2166 SmallVector<SDValue, 2> Ops;
2169 if (SDNode *FRINTXNode = GenerateInexactFlagIfNeeded(In, InTyVariant, DL))
2170 Ops.push_back(SDValue(FRINTXNode, 1));
2172 return CurDAG->getMachineNode(Opc, DL, OutTy, Ops);
2176 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2177 unsigned RegWidth) {
2179 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2180 FVal = CN->getValueAPF();
2181 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2182 // Some otherwise illegal constants are allowed in this case.
2183 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2184 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2187 ConstantPoolSDNode *CN =
2188 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2189 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2193 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2194 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2197 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2198 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2202 // fbits is between 1 and 64 in the worst-case, which means the fmul
2203 // could have 2^64 as an actual operand. Need 65 bits of precision.
2204 APSInt IntVal(65, true);
2205 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2207 // N.b. isPowerOf2 also checks for > 0.
2208 if (!IsExact || !IntVal.isPowerOf2()) return false;
2209 unsigned FBits = IntVal.logBase2();
2211 // Checks above should have guaranteed that we haven't lost information in
2212 // finding FBits, but it must still be in range.
2213 if (FBits == 0 || FBits > RegWidth) return false;
2215 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
2219 // Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2220 // of the string and obtains the integer values from them and combines these
2221 // into a single value to be used in the MRS/MSR instruction.
2222 static int getIntOperandFromRegisterString(StringRef RegString) {
2223 SmallVector<StringRef, 5> Fields;
2224 RegString.split(Fields, ":");
2226 if (Fields.size() == 1)
2229 assert(Fields.size() == 5
2230 && "Invalid number of fields in read register string");
2232 SmallVector<int, 5> Ops;
2233 bool AllIntFields = true;
2235 for (StringRef Field : Fields) {
2237 AllIntFields &= !Field.getAsInteger(10, IntField);
2238 Ops.push_back(IntField);
2241 assert(AllIntFields &&
2242 "Unexpected non-integer value in special register string.");
2244 // Need to combine the integer fields of the string into a single value
2245 // based on the bit encoding of MRS/MSR instruction.
2246 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2247 (Ops[3] << 3) | (Ops[4]);
2250 // Lower the read_register intrinsic to an MRS instruction node if the special
2251 // register string argument is either of the form detailed in the ALCE (the
2252 // form described in getIntOperandsFromRegsterString) or is a named register
2253 // known by the MRS SysReg mapper.
2254 SDNode *AArch64DAGToDAGISel::SelectReadRegister(SDNode *N) {
2255 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2256 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2259 int Reg = getIntOperandFromRegisterString(RegString->getString());
2261 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2263 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2266 // Use the sysreg mapper to map the remaining possible strings to the
2267 // value for the register to be used for the instruction operand.
2268 AArch64SysReg::MRSMapper mapper;
2269 bool IsValidSpecialReg;
2270 Reg = mapper.fromString(RegString->getString(),
2271 Subtarget->getFeatureBits(),
2273 if (IsValidSpecialReg)
2274 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2276 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2282 // Lower the write_register intrinsic to an MSR instruction node if the special
2283 // register string argument is either of the form detailed in the ALCE (the
2284 // form described in getIntOperandsFromRegsterString) or is a named register
2285 // known by the MSR SysReg mapper.
2286 SDNode *AArch64DAGToDAGISel::SelectWriteRegister(SDNode *N) {
2287 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2288 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2291 int Reg = getIntOperandFromRegisterString(RegString->getString());
2293 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2294 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2295 N->getOperand(2), N->getOperand(0));
2297 // Check if the register was one of those allowed as the pstatefield value in
2298 // the MSR (immediate) instruction. To accept the values allowed in the
2299 // pstatefield for the MSR (immediate) instruction, we also require that an
2300 // immediate value has been provided as an argument, we know that this is
2301 // the case as it has been ensured by semantic checking.
2302 AArch64PState::PStateMapper PMapper;
2303 bool IsValidSpecialReg;
2304 Reg = PMapper.fromString(RegString->getString(),
2305 Subtarget->getFeatureBits(),
2307 if (IsValidSpecialReg) {
2308 assert (isa<ConstantSDNode>(N->getOperand(2))
2309 && "Expected a constant integer expression.");
2310 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
2311 return CurDAG->getMachineNode(AArch64::MSRpstate, DL, MVT::Other,
2312 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2313 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
2317 // Use the sysreg mapper to attempt to map the remaining possible strings
2318 // to the value for the register to be used for the MSR (register)
2319 // instruction operand.
2320 AArch64SysReg::MSRMapper Mapper;
2321 Reg = Mapper.fromString(RegString->getString(),
2322 Subtarget->getFeatureBits(),
2325 if (IsValidSpecialReg)
2326 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2327 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2328 N->getOperand(2), N->getOperand(0));
2333 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
2334 // Dump information about the Node being selected
2335 DEBUG(errs() << "Selecting: ");
2336 DEBUG(Node->dump(CurDAG));
2337 DEBUG(errs() << "\n");
2339 // If we have a custom node, we already have selected!
2340 if (Node->isMachineOpcode()) {
2341 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2342 Node->setNodeId(-1);
2346 // Few custom selection stuff.
2347 SDNode *ResNode = nullptr;
2348 EVT VT = Node->getValueType(0);
2350 switch (Node->getOpcode()) {
2354 case ISD::READ_REGISTER:
2355 if (SDNode *Res = SelectReadRegister(Node))
2359 case ISD::WRITE_REGISTER:
2360 if (SDNode *Res = SelectWriteRegister(Node))
2365 if (SDNode *I = SelectMLAV64LaneV128(Node))
2370 // Try to select as an indexed load. Fall through to normal processing
2373 SDNode *I = SelectIndexedLoad(Node, Done);
2382 if (SDNode *I = SelectBitfieldExtractOp(Node))
2387 if (SDNode *I = SelectBitfieldInsertOp(Node))
2391 case ISD::EXTRACT_VECTOR_ELT: {
2392 // Extracting lane zero is a special case where we can just use a plain
2393 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2394 // the rest of the compiler, especially the register allocator and copyi
2395 // propagation, to reason about, so is preferred when it's possible to
2397 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2398 // Bail and use the default Select() for non-zero lanes.
2399 if (LaneNode->getZExtValue() != 0)
2401 // If the element type is not the same as the result type, likewise
2402 // bail and use the default Select(), as there's more to do than just
2403 // a cross-class COPY. This catches extracts of i8 and i16 elements
2404 // since they will need an explicit zext.
2405 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2408 switch (Node->getOperand(0)
2410 .getVectorElementType()
2413 llvm_unreachable("Unexpected vector element type!");
2415 SubReg = AArch64::dsub;
2418 SubReg = AArch64::ssub;
2421 SubReg = AArch64::hsub;
2424 llvm_unreachable("unexpected zext-requiring extract element!");
2426 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2427 Node->getOperand(0));
2428 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2429 DEBUG(Extract->dumpr(CurDAG));
2430 DEBUG(dbgs() << "\n");
2431 return Extract.getNode();
2433 case ISD::Constant: {
2434 // Materialize zero constants as copies from WZR/XZR. This allows
2435 // the coalescer to propagate these into other instructions.
2436 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2437 if (ConstNode->isNullValue()) {
2439 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2440 AArch64::WZR, MVT::i32).getNode();
2441 else if (VT == MVT::i64)
2442 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2443 AArch64::XZR, MVT::i64).getNode();
2448 case ISD::FrameIndex: {
2449 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2450 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2451 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2452 const TargetLowering *TLI = getTargetLowering();
2453 SDValue TFI = CurDAG->getTargetFrameIndex(
2454 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
2456 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
2457 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
2458 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
2460 case ISD::INTRINSIC_W_CHAIN: {
2461 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2465 case Intrinsic::aarch64_ldaxp:
2466 case Intrinsic::aarch64_ldxp: {
2468 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2469 SDValue MemAddr = Node->getOperand(2);
2471 SDValue Chain = Node->getOperand(0);
2473 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2474 MVT::Other, MemAddr, Chain);
2476 // Transfer memoperands.
2477 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2478 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2479 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2482 case Intrinsic::aarch64_stlxp:
2483 case Intrinsic::aarch64_stxp: {
2485 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2487 SDValue Chain = Node->getOperand(0);
2488 SDValue ValLo = Node->getOperand(2);
2489 SDValue ValHi = Node->getOperand(3);
2490 SDValue MemAddr = Node->getOperand(4);
2492 // Place arguments in the right order.
2493 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
2495 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2496 // Transfer memoperands.
2497 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2498 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2499 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2503 case Intrinsic::aarch64_neon_ld1x2:
2504 if (VT == MVT::v8i8)
2505 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2506 else if (VT == MVT::v16i8)
2507 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
2508 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2509 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
2510 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2511 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2512 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2513 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2514 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2515 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2516 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2517 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2518 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2519 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2521 case Intrinsic::aarch64_neon_ld1x3:
2522 if (VT == MVT::v8i8)
2523 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2524 else if (VT == MVT::v16i8)
2525 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
2526 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2527 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
2528 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2529 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2530 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2531 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2532 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2533 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2534 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2535 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2536 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2537 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2539 case Intrinsic::aarch64_neon_ld1x4:
2540 if (VT == MVT::v8i8)
2541 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2542 else if (VT == MVT::v16i8)
2543 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
2544 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2545 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
2546 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2547 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2548 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2549 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2550 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2551 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2552 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2553 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2554 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2555 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2557 case Intrinsic::aarch64_neon_ld2:
2558 if (VT == MVT::v8i8)
2559 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2560 else if (VT == MVT::v16i8)
2561 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
2562 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2563 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
2564 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2565 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2566 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2567 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2568 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2569 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2570 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2571 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2572 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2573 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2575 case Intrinsic::aarch64_neon_ld3:
2576 if (VT == MVT::v8i8)
2577 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2578 else if (VT == MVT::v16i8)
2579 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
2580 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2581 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
2582 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2583 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2584 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2585 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2586 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2587 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2588 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2589 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2590 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2591 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2593 case Intrinsic::aarch64_neon_ld4:
2594 if (VT == MVT::v8i8)
2595 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2596 else if (VT == MVT::v16i8)
2597 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
2598 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2599 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
2600 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2601 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2602 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2603 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2604 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2605 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2606 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2607 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2608 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2609 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2611 case Intrinsic::aarch64_neon_ld2r:
2612 if (VT == MVT::v8i8)
2613 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2614 else if (VT == MVT::v16i8)
2615 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
2616 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2617 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
2618 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2619 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2620 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2621 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2622 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2623 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2624 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2625 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2626 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2627 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2629 case Intrinsic::aarch64_neon_ld3r:
2630 if (VT == MVT::v8i8)
2631 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2632 else if (VT == MVT::v16i8)
2633 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
2634 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2635 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
2636 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2637 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2638 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2639 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2640 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2641 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2642 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2643 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2644 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2645 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2647 case Intrinsic::aarch64_neon_ld4r:
2648 if (VT == MVT::v8i8)
2649 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2650 else if (VT == MVT::v16i8)
2651 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
2652 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2653 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
2654 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2655 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2656 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2657 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2658 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2659 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2660 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2661 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2662 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2663 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2665 case Intrinsic::aarch64_neon_ld2lane:
2666 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2667 return SelectLoadLane(Node, 2, AArch64::LD2i8);
2668 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2670 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2671 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2673 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2674 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2676 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2678 case Intrinsic::aarch64_neon_ld3lane:
2679 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2680 return SelectLoadLane(Node, 3, AArch64::LD3i8);
2681 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2683 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2684 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2686 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2687 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2689 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2691 case Intrinsic::aarch64_neon_ld4lane:
2692 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2693 return SelectLoadLane(Node, 4, AArch64::LD4i8);
2694 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2696 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2697 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2699 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2700 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2702 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2706 case ISD::INTRINSIC_WO_CHAIN: {
2707 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2711 case Intrinsic::aarch64_neon_tbl2:
2712 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
2713 : AArch64::TBLv16i8Two,
2715 case Intrinsic::aarch64_neon_tbl3:
2716 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
2717 : AArch64::TBLv16i8Three,
2719 case Intrinsic::aarch64_neon_tbl4:
2720 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
2721 : AArch64::TBLv16i8Four,
2723 case Intrinsic::aarch64_neon_tbx2:
2724 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
2725 : AArch64::TBXv16i8Two,
2727 case Intrinsic::aarch64_neon_tbx3:
2728 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
2729 : AArch64::TBXv16i8Three,
2731 case Intrinsic::aarch64_neon_tbx4:
2732 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
2733 : AArch64::TBXv16i8Four,
2735 case Intrinsic::aarch64_neon_smull:
2736 case Intrinsic::aarch64_neon_umull:
2737 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2743 case ISD::INTRINSIC_VOID: {
2744 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2745 if (Node->getNumOperands() >= 3)
2746 VT = Node->getOperand(2)->getValueType(0);
2750 case Intrinsic::aarch64_neon_st1x2: {
2751 if (VT == MVT::v8i8)
2752 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2753 else if (VT == MVT::v16i8)
2754 return SelectStore(Node, 2, AArch64::ST1Twov16b);
2755 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2756 return SelectStore(Node, 2, AArch64::ST1Twov4h);
2757 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2758 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2759 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2760 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2761 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2762 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2763 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2764 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2765 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2766 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2769 case Intrinsic::aarch64_neon_st1x3: {
2770 if (VT == MVT::v8i8)
2771 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2772 else if (VT == MVT::v16i8)
2773 return SelectStore(Node, 3, AArch64::ST1Threev16b);
2774 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2775 return SelectStore(Node, 3, AArch64::ST1Threev4h);
2776 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2777 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2778 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2779 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2780 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2781 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2782 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2783 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2784 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2785 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2788 case Intrinsic::aarch64_neon_st1x4: {
2789 if (VT == MVT::v8i8)
2790 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2791 else if (VT == MVT::v16i8)
2792 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
2793 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2794 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
2795 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2796 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2797 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2798 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2799 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2800 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2801 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2802 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2803 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2804 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2807 case Intrinsic::aarch64_neon_st2: {
2808 if (VT == MVT::v8i8)
2809 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2810 else if (VT == MVT::v16i8)
2811 return SelectStore(Node, 2, AArch64::ST2Twov16b);
2812 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2813 return SelectStore(Node, 2, AArch64::ST2Twov4h);
2814 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2815 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2816 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2817 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2818 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2819 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2820 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2821 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2822 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2823 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2826 case Intrinsic::aarch64_neon_st3: {
2827 if (VT == MVT::v8i8)
2828 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2829 else if (VT == MVT::v16i8)
2830 return SelectStore(Node, 3, AArch64::ST3Threev16b);
2831 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2832 return SelectStore(Node, 3, AArch64::ST3Threev4h);
2833 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2834 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2835 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2836 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2837 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2838 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2839 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2840 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2841 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2842 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2845 case Intrinsic::aarch64_neon_st4: {
2846 if (VT == MVT::v8i8)
2847 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2848 else if (VT == MVT::v16i8)
2849 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
2850 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2851 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
2852 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2853 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2854 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2855 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2856 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2857 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2858 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2859 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2860 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2861 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2864 case Intrinsic::aarch64_neon_st2lane: {
2865 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2866 return SelectStoreLane(Node, 2, AArch64::ST2i8);
2867 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2869 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2870 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2872 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2873 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2875 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2878 case Intrinsic::aarch64_neon_st3lane: {
2879 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2880 return SelectStoreLane(Node, 3, AArch64::ST3i8);
2881 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2883 return SelectStoreLane(Node, 3, AArch64::ST3i16);
2884 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2886 return SelectStoreLane(Node, 3, AArch64::ST3i32);
2887 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2889 return SelectStoreLane(Node, 3, AArch64::ST3i64);
2892 case Intrinsic::aarch64_neon_st4lane: {
2893 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2894 return SelectStoreLane(Node, 4, AArch64::ST4i8);
2895 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2897 return SelectStoreLane(Node, 4, AArch64::ST4i16);
2898 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2900 return SelectStoreLane(Node, 4, AArch64::ST4i32);
2901 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2903 return SelectStoreLane(Node, 4, AArch64::ST4i64);
2908 case AArch64ISD::LD2post: {
2909 if (VT == MVT::v8i8)
2910 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
2911 else if (VT == MVT::v16i8)
2912 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
2913 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2914 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
2915 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2916 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
2917 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2918 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
2919 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2920 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
2921 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2922 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2923 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2924 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
2927 case AArch64ISD::LD3post: {
2928 if (VT == MVT::v8i8)
2929 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
2930 else if (VT == MVT::v16i8)
2931 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
2932 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2933 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
2934 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2935 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
2936 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2937 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
2938 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2939 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
2940 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2941 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2942 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2943 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
2946 case AArch64ISD::LD4post: {
2947 if (VT == MVT::v8i8)
2948 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
2949 else if (VT == MVT::v16i8)
2950 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
2951 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2952 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
2953 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2954 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
2955 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2956 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
2957 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2958 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
2959 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2960 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2961 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2962 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
2965 case AArch64ISD::LD1x2post: {
2966 if (VT == MVT::v8i8)
2967 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
2968 else if (VT == MVT::v16i8)
2969 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
2970 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2971 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
2972 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2973 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
2974 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2975 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
2976 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2977 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
2978 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2979 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2980 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2981 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
2984 case AArch64ISD::LD1x3post: {
2985 if (VT == MVT::v8i8)
2986 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
2987 else if (VT == MVT::v16i8)
2988 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
2989 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2990 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
2991 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2992 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
2993 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2994 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
2995 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2996 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
2997 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2998 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2999 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3000 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
3003 case AArch64ISD::LD1x4post: {
3004 if (VT == MVT::v8i8)
3005 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
3006 else if (VT == MVT::v16i8)
3007 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
3008 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3009 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
3010 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3011 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
3012 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3013 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
3014 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3015 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
3016 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3017 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
3018 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3019 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
3022 case AArch64ISD::LD1DUPpost: {
3023 if (VT == MVT::v8i8)
3024 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
3025 else if (VT == MVT::v16i8)
3026 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
3027 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3028 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
3029 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3030 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
3031 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3032 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
3033 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3034 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
3035 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3036 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
3037 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3038 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
3041 case AArch64ISD::LD2DUPpost: {
3042 if (VT == MVT::v8i8)
3043 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
3044 else if (VT == MVT::v16i8)
3045 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
3046 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3047 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
3048 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3049 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
3050 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3051 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
3052 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3053 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
3054 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3055 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
3056 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3057 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
3060 case AArch64ISD::LD3DUPpost: {
3061 if (VT == MVT::v8i8)
3062 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
3063 else if (VT == MVT::v16i8)
3064 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
3065 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3066 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
3067 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3068 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
3069 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3070 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
3071 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3072 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
3073 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3074 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
3075 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3076 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
3079 case AArch64ISD::LD4DUPpost: {
3080 if (VT == MVT::v8i8)
3081 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
3082 else if (VT == MVT::v16i8)
3083 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
3084 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3085 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
3086 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3087 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
3088 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3089 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
3090 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3091 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
3092 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3093 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
3094 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3095 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
3098 case AArch64ISD::LD1LANEpost: {
3099 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3100 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
3101 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3103 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
3104 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3106 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
3107 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3109 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
3112 case AArch64ISD::LD2LANEpost: {
3113 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3114 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
3115 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3117 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
3118 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3120 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
3121 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3123 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
3126 case AArch64ISD::LD3LANEpost: {
3127 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3128 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
3129 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3131 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
3132 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3134 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
3135 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3137 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
3140 case AArch64ISD::LD4LANEpost: {
3141 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3142 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
3143 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3145 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
3146 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3148 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
3149 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3151 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
3154 case AArch64ISD::ST2post: {
3155 VT = Node->getOperand(1).getValueType();
3156 if (VT == MVT::v8i8)
3157 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
3158 else if (VT == MVT::v16i8)
3159 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
3160 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3161 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
3162 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3163 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
3164 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3165 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
3166 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3167 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
3168 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3169 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
3170 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3171 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3174 case AArch64ISD::ST3post: {
3175 VT = Node->getOperand(1).getValueType();
3176 if (VT == MVT::v8i8)
3177 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
3178 else if (VT == MVT::v16i8)
3179 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
3180 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3181 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
3182 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3183 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
3184 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3185 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
3186 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3187 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
3188 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3189 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
3190 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3191 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3194 case AArch64ISD::ST4post: {
3195 VT = Node->getOperand(1).getValueType();
3196 if (VT == MVT::v8i8)
3197 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
3198 else if (VT == MVT::v16i8)
3199 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
3200 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3201 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
3202 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3203 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
3204 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3205 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
3206 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3207 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
3208 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3209 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
3210 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3211 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3214 case AArch64ISD::ST1x2post: {
3215 VT = Node->getOperand(1).getValueType();
3216 if (VT == MVT::v8i8)
3217 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
3218 else if (VT == MVT::v16i8)
3219 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
3220 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3221 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
3222 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3223 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
3224 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3225 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
3226 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3227 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
3228 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3229 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3230 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3231 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
3234 case AArch64ISD::ST1x3post: {
3235 VT = Node->getOperand(1).getValueType();
3236 if (VT == MVT::v8i8)
3237 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
3238 else if (VT == MVT::v16i8)
3239 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
3240 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3241 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
3242 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3243 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
3244 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3245 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
3246 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3247 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
3248 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3249 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3250 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3251 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
3254 case AArch64ISD::ST1x4post: {
3255 VT = Node->getOperand(1).getValueType();
3256 if (VT == MVT::v8i8)
3257 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
3258 else if (VT == MVT::v16i8)
3259 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
3260 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3261 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
3262 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3263 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
3264 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3265 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
3266 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3267 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3268 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3269 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3270 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3271 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3274 case AArch64ISD::ST2LANEpost: {
3275 VT = Node->getOperand(1).getValueType();
3276 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3277 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
3278 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3280 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3281 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3283 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3284 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3286 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3289 case AArch64ISD::ST3LANEpost: {
3290 VT = Node->getOperand(1).getValueType();
3291 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3292 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
3293 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3295 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3296 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3298 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3299 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3301 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3304 case AArch64ISD::ST4LANEpost: {
3305 VT = Node->getOperand(1).getValueType();
3306 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3307 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
3308 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3310 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3311 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3313 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3314 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3316 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3324 if (SDNode *I = SelectLIBM(Node))
3328 case ISD::FP_TO_SINT:
3329 case ISD::FP_TO_UINT:
3330 if (SDNode *I = SelectFPConvertWithRound(Node))
3335 // Select the default instruction
3336 ResNode = SelectCode(Node);
3338 DEBUG(errs() << "=> ");
3339 if (ResNode == nullptr || ResNode == Node)
3340 DEBUG(Node->dump(CurDAG));
3342 DEBUG(ResNode->dump(CurDAG));
3343 DEBUG(errs() << "\n");
3348 /// createAArch64ISelDag - This pass converts a legalized DAG into a
3349 /// AArch64-specific DAG, ready for instruction scheduling.
3350 FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
3351 CodeGenOpt::Level OptLevel) {
3352 return new AArch64DAGToDAGISel(TM, OptLevel);