1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the AArch64 target.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
28 #define DEBUG_TYPE "aarch64-isel"
30 //===--------------------------------------------------------------------===//
31 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32 /// instructions for SelectionDAG operations.
36 class AArch64DAGToDAGISel : public SelectionDAGISel {
37 AArch64TargetMachine &TM;
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
48 : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr),
51 const char *getPassName() const override {
52 return "AArch64 Instruction Selection";
55 bool runOnMachineFunction(MachineFunction &MF) override {
57 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
58 MF.getFunction()->hasFnAttribute(Attribute::MinSize);
59 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
60 return SelectionDAGISel::runOnMachineFunction(MF);
63 SDNode *Select(SDNode *Node) override;
65 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
66 /// inline asm expressions.
67 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
68 unsigned ConstraintID,
69 std::vector<SDValue> &OutOps) override;
71 SDNode *SelectMLAV64LaneV128(SDNode *N);
72 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
73 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
74 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
75 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
76 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
77 return SelectShiftedRegister(N, false, Reg, Shift);
79 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
80 return SelectShiftedRegister(N, true, Reg, Shift);
82 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
83 return SelectAddrModeIndexed(N, 1, Base, OffImm);
85 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
86 return SelectAddrModeIndexed(N, 2, Base, OffImm);
88 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
89 return SelectAddrModeIndexed(N, 4, Base, OffImm);
91 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
92 return SelectAddrModeIndexed(N, 8, Base, OffImm);
94 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
95 return SelectAddrModeIndexed(N, 16, Base, OffImm);
97 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
98 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
100 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
101 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
103 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
104 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
106 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
107 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
109 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
110 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
114 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
115 SDValue &SignExtend, SDValue &DoShift) {
116 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
120 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
121 SDValue &SignExtend, SDValue &DoShift) {
122 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
126 /// Form sequences of consecutive 64/128-bit registers for use in NEON
127 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
128 /// between 1 and 4 elements. If it contains a single element that is returned
129 /// unchanged; otherwise a REG_SEQUENCE value is returned.
130 SDValue createDTuple(ArrayRef<SDValue> Vecs);
131 SDValue createQTuple(ArrayRef<SDValue> Vecs);
133 /// Generic helper for the createDTuple/createQTuple
134 /// functions. Those should almost always be called instead.
135 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
136 const unsigned SubRegs[]);
138 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
140 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
142 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
144 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
146 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
147 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
149 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
150 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
151 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
152 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
154 SDNode *SelectBitfieldExtractOp(SDNode *N);
155 SDNode *SelectBitfieldInsertOp(SDNode *N);
157 SDNode *SelectLIBM(SDNode *N);
159 SDNode *SelectReadRegister(SDNode *N);
160 SDNode *SelectWriteRegister(SDNode *N);
162 // Include the pieces autogenerated from the target description.
163 #include "AArch64GenDAGISel.inc"
166 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
168 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
170 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
172 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
173 SDValue &Offset, SDValue &SignExtend,
175 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
176 SDValue &Offset, SDValue &SignExtend,
178 bool isWorthFolding(SDValue V) const;
179 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
180 SDValue &Offset, SDValue &SignExtend);
182 template<unsigned RegWidth>
183 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
184 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
187 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
189 } // end anonymous namespace
191 /// isIntImmediate - This method tests to see if the node is a constant
192 /// operand. If so Imm will receive the 32-bit value.
193 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
194 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
195 Imm = C->getZExtValue();
201 // isIntImmediate - This method tests to see if a constant operand.
202 // If so Imm will receive the value.
203 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
204 return isIntImmediate(N.getNode(), Imm);
207 // isOpcWithIntImmediate - This method tests to see if the node is a specific
208 // opcode and that it has a immediate integer right operand.
209 // If so Imm will receive the 32 bit value.
210 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
212 return N->getOpcode() == Opc &&
213 isIntImmediate(N->getOperand(1).getNode(), Imm);
216 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
217 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
218 switch(ConstraintID) {
220 llvm_unreachable("Unexpected asm memory constraint");
221 case InlineAsm::Constraint_i:
222 case InlineAsm::Constraint_m:
223 case InlineAsm::Constraint_Q:
224 // Require the address to be in a register. That is safe for all AArch64
225 // variants and it is hard to do anything much smarter without knowing
226 // how the operand is used.
227 OutOps.push_back(Op);
233 /// SelectArithImmed - Select an immediate value that can be represented as
234 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
235 /// Val set to the 12-bit value and Shift set to the shifter operand.
236 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
238 // This function is called from the addsub_shifted_imm ComplexPattern,
239 // which lists [imm] as the list of opcode it's interested in, however
240 // we still need to check whether the operand is actually an immediate
241 // here because the ComplexPattern opcode list is only used in
242 // root-level opcode matching.
243 if (!isa<ConstantSDNode>(N.getNode()))
246 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
249 if (Immed >> 12 == 0) {
251 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
257 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
259 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
260 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
264 /// SelectNegArithImmed - As above, but negates the value before trying to
266 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
268 // This function is called from the addsub_shifted_imm ComplexPattern,
269 // which lists [imm] as the list of opcode it's interested in, however
270 // we still need to check whether the operand is actually an immediate
271 // here because the ComplexPattern opcode list is only used in
272 // root-level opcode matching.
273 if (!isa<ConstantSDNode>(N.getNode()))
276 // The immediate operand must be a 24-bit zero-extended immediate.
277 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
279 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
280 // have the opposite effect on the C flag, so this pattern mustn't match under
281 // those circumstances.
285 if (N.getValueType() == MVT::i32)
286 Immed = ~((uint32_t)Immed) + 1;
288 Immed = ~Immed + 1ULL;
289 if (Immed & 0xFFFFFFFFFF000000ULL)
292 Immed &= 0xFFFFFFULL;
293 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
297 /// getShiftTypeForNode - Translate a shift node to the corresponding
299 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
300 switch (N.getOpcode()) {
302 return AArch64_AM::InvalidShiftExtend;
304 return AArch64_AM::LSL;
306 return AArch64_AM::LSR;
308 return AArch64_AM::ASR;
310 return AArch64_AM::ROR;
314 /// \brief Determine whether it is worth to fold V into an extended register.
315 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
316 // it hurts if the value is used at least twice, unless we are optimizing
318 if (ForCodeSize || V.hasOneUse())
323 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
324 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
325 /// instructions allow the shifted register to be rotated, but the arithmetic
326 /// instructions do not. The AllowROR parameter specifies whether ROR is
328 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
329 SDValue &Reg, SDValue &Shift) {
330 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
331 if (ShType == AArch64_AM::InvalidShiftExtend)
333 if (!AllowROR && ShType == AArch64_AM::ROR)
336 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
337 unsigned BitSize = N.getValueType().getSizeInBits();
338 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
339 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
341 Reg = N.getOperand(0);
342 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
343 return isWorthFolding(N);
349 /// getExtendTypeForNode - Translate an extend node to the corresponding
350 /// ExtendType value.
351 static AArch64_AM::ShiftExtendType
352 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
353 if (N.getOpcode() == ISD::SIGN_EXTEND ||
354 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
356 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
357 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
359 SrcVT = N.getOperand(0).getValueType();
361 if (!IsLoadStore && SrcVT == MVT::i8)
362 return AArch64_AM::SXTB;
363 else if (!IsLoadStore && SrcVT == MVT::i16)
364 return AArch64_AM::SXTH;
365 else if (SrcVT == MVT::i32)
366 return AArch64_AM::SXTW;
367 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
369 return AArch64_AM::InvalidShiftExtend;
370 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
371 N.getOpcode() == ISD::ANY_EXTEND) {
372 EVT SrcVT = N.getOperand(0).getValueType();
373 if (!IsLoadStore && SrcVT == MVT::i8)
374 return AArch64_AM::UXTB;
375 else if (!IsLoadStore && SrcVT == MVT::i16)
376 return AArch64_AM::UXTH;
377 else if (SrcVT == MVT::i32)
378 return AArch64_AM::UXTW;
379 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
381 return AArch64_AM::InvalidShiftExtend;
382 } else if (N.getOpcode() == ISD::AND) {
383 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
385 return AArch64_AM::InvalidShiftExtend;
386 uint64_t AndMask = CSD->getZExtValue();
390 return AArch64_AM::InvalidShiftExtend;
392 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
394 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
396 return AArch64_AM::UXTW;
400 return AArch64_AM::InvalidShiftExtend;
403 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
404 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
405 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
406 DL->getOpcode() != AArch64ISD::DUPLANE32)
409 SDValue SV = DL->getOperand(0);
410 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
413 SDValue EV = SV.getOperand(1);
414 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
417 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
418 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
419 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
420 LaneOp = EV.getOperand(0);
425 // Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
426 // high lane extract.
427 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
428 SDValue &LaneOp, int &LaneIdx) {
430 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
432 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
439 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
440 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
441 /// so that we don't emit unnecessary lane extracts.
442 SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
444 SDValue Op0 = N->getOperand(0);
445 SDValue Op1 = N->getOperand(1);
446 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
447 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
448 int LaneIdx = -1; // Will hold the lane index.
450 if (Op1.getOpcode() != ISD::MUL ||
451 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
454 if (Op1.getOpcode() != ISD::MUL ||
455 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
460 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
462 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
464 unsigned MLAOpc = ~0U;
466 switch (N->getSimpleValueType(0).SimpleTy) {
468 llvm_unreachable("Unrecognized MLA.");
470 MLAOpc = AArch64::MLAv4i16_indexed;
473 MLAOpc = AArch64::MLAv8i16_indexed;
476 MLAOpc = AArch64::MLAv2i32_indexed;
479 MLAOpc = AArch64::MLAv4i32_indexed;
483 return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops);
486 SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
492 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
496 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
498 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
500 unsigned SMULLOpc = ~0U;
502 if (IntNo == Intrinsic::aarch64_neon_smull) {
503 switch (N->getSimpleValueType(0).SimpleTy) {
505 llvm_unreachable("Unrecognized SMULL.");
507 SMULLOpc = AArch64::SMULLv4i16_indexed;
510 SMULLOpc = AArch64::SMULLv2i32_indexed;
513 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
514 switch (N->getSimpleValueType(0).SimpleTy) {
516 llvm_unreachable("Unrecognized SMULL.");
518 SMULLOpc = AArch64::UMULLv4i16_indexed;
521 SMULLOpc = AArch64::UMULLv2i32_indexed;
525 llvm_unreachable("Unrecognized intrinsic.");
527 return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops);
530 /// Instructions that accept extend modifiers like UXTW expect the register
531 /// being extended to be a GPR32, but the incoming DAG might be acting on a
532 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
533 /// this is the case.
534 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
535 if (N.getValueType() == MVT::i32)
539 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
540 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
541 dl, MVT::i32, N, SubReg);
542 return SDValue(Node, 0);
546 /// SelectArithExtendedRegister - Select a "extended register" operand. This
547 /// operand folds in an extend followed by an optional left shift.
548 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
550 unsigned ShiftVal = 0;
551 AArch64_AM::ShiftExtendType Ext;
553 if (N.getOpcode() == ISD::SHL) {
554 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
557 ShiftVal = CSD->getZExtValue();
561 Ext = getExtendTypeForNode(N.getOperand(0));
562 if (Ext == AArch64_AM::InvalidShiftExtend)
565 Reg = N.getOperand(0).getOperand(0);
567 Ext = getExtendTypeForNode(N);
568 if (Ext == AArch64_AM::InvalidShiftExtend)
571 Reg = N.getOperand(0);
574 // AArch64 mandates that the RHS of the operation must use the smallest
575 // register classs that could contain the size being extended from. Thus,
576 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
577 // there might not be an actual 32-bit value in the program. We can
578 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
579 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
580 Reg = narrowIfNeeded(CurDAG, Reg);
581 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
583 return isWorthFolding(N);
586 /// If there's a use of this ADDlow that's not itself a load/store then we'll
587 /// need to create a real ADD instruction from it anyway and there's no point in
588 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
589 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
590 /// leads to duplaicated ADRP instructions.
591 static bool isWorthFoldingADDlow(SDValue N) {
592 for (auto Use : N->uses()) {
593 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
594 Use->getOpcode() != ISD::ATOMIC_LOAD &&
595 Use->getOpcode() != ISD::ATOMIC_STORE)
598 // ldar and stlr have much more restrictive addressing modes (just a
600 if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
607 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
608 /// immediate" address. The "Size" argument is the size in bytes of the memory
609 /// reference, which determines the scale.
610 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
611 SDValue &Base, SDValue &OffImm) {
613 const TargetLowering *TLI = getTargetLowering();
614 if (N.getOpcode() == ISD::FrameIndex) {
615 int FI = cast<FrameIndexSDNode>(N)->getIndex();
616 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
617 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
621 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
622 GlobalAddressSDNode *GAN =
623 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
624 Base = N.getOperand(0);
625 OffImm = N.getOperand(1);
629 const GlobalValue *GV = GAN->getGlobal();
630 unsigned Alignment = GV->getAlignment();
631 const DataLayout *DL = TLI->getDataLayout();
632 Type *Ty = GV->getType()->getElementType();
633 if (Alignment == 0 && Ty->isSized())
634 Alignment = DL->getABITypeAlignment(Ty);
636 if (Alignment >= Size)
640 if (CurDAG->isBaseWithConstantOffset(N)) {
641 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
642 int64_t RHSC = (int64_t)RHS->getZExtValue();
643 unsigned Scale = Log2_32(Size);
644 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
645 Base = N.getOperand(0);
646 if (Base.getOpcode() == ISD::FrameIndex) {
647 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
648 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
650 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
656 // Before falling back to our general case, check if the unscaled
657 // instructions can handle this. If so, that's preferable.
658 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
661 // Base only. The address will be materialized into a register before
662 // the memory is accessed.
663 // add x0, Xbase, #offset
666 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
670 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
671 /// immediate" address. This should only match when there is an offset that
672 /// is not valid for a scaled immediate addressing mode. The "Size" argument
673 /// is the size in bytes of the memory reference, which is needed here to know
674 /// what is valid for a scaled immediate.
675 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
678 if (!CurDAG->isBaseWithConstantOffset(N))
680 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
681 int64_t RHSC = RHS->getSExtValue();
682 // If the offset is valid as a scaled immediate, don't match here.
683 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
684 RHSC < (0x1000 << Log2_32(Size)))
686 if (RHSC >= -256 && RHSC < 256) {
687 Base = N.getOperand(0);
688 if (Base.getOpcode() == ISD::FrameIndex) {
689 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
690 const TargetLowering *TLI = getTargetLowering();
691 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
693 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
700 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
702 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
703 SDValue ImpDef = SDValue(
704 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
705 MachineSDNode *Node = CurDAG->getMachineNode(
706 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
707 return SDValue(Node, 0);
710 /// \brief Check if the given SHL node (\p N), can be used to form an
711 /// extended register for an addressing mode.
712 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
713 bool WantExtend, SDValue &Offset,
714 SDValue &SignExtend) {
715 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
716 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
717 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
722 AArch64_AM::ShiftExtendType Ext =
723 getExtendTypeForNode(N.getOperand(0), true);
724 if (Ext == AArch64_AM::InvalidShiftExtend)
727 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
728 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
731 Offset = N.getOperand(0);
732 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
735 unsigned LegalShiftVal = Log2_32(Size);
736 unsigned ShiftVal = CSD->getZExtValue();
738 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
741 if (isWorthFolding(N))
747 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
748 SDValue &Base, SDValue &Offset,
751 if (N.getOpcode() != ISD::ADD)
753 SDValue LHS = N.getOperand(0);
754 SDValue RHS = N.getOperand(1);
757 // We don't want to match immediate adds here, because they are better lowered
758 // to the register-immediate addressing modes.
759 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
762 // Check if this particular node is reused in any non-memory related
763 // operation. If yes, do not try to fold this node into the address
764 // computation, since the computation will be kept.
765 const SDNode *Node = N.getNode();
766 for (SDNode *UI : Node->uses()) {
767 if (!isa<MemSDNode>(*UI))
771 // Remember if it is worth folding N when it produces extended register.
772 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
774 // Try to match a shifted extend on the RHS.
775 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
776 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
778 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
782 // Try to match a shifted extend on the LHS.
783 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
784 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
786 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
790 // There was no shift, whatever else we find.
791 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
793 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
794 // Try to match an unshifted extend on the LHS.
795 if (IsExtendedRegisterWorthFolding &&
796 (Ext = getExtendTypeForNode(LHS, true)) !=
797 AArch64_AM::InvalidShiftExtend) {
799 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
800 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
802 if (isWorthFolding(LHS))
806 // Try to match an unshifted extend on the RHS.
807 if (IsExtendedRegisterWorthFolding &&
808 (Ext = getExtendTypeForNode(RHS, true)) !=
809 AArch64_AM::InvalidShiftExtend) {
811 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
812 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
814 if (isWorthFolding(RHS))
821 // Check if the given immediate is preferred by ADD. If an immediate can be
822 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
823 // encoded by one MOVZ, return true.
824 static bool isPreferredADD(int64_t ImmOff) {
825 // Constant in [0x0, 0xfff] can be encoded in ADD.
826 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
828 // Check if it can be encoded in an "ADD LSL #12".
829 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
830 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
831 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
832 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
836 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
837 SDValue &Base, SDValue &Offset,
840 if (N.getOpcode() != ISD::ADD)
842 SDValue LHS = N.getOperand(0);
843 SDValue RHS = N.getOperand(1);
846 // Check if this particular node is reused in any non-memory related
847 // operation. If yes, do not try to fold this node into the address
848 // computation, since the computation will be kept.
849 const SDNode *Node = N.getNode();
850 for (SDNode *UI : Node->uses()) {
851 if (!isa<MemSDNode>(*UI))
855 // Watch out if RHS is a wide immediate, it can not be selected into
856 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
857 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
858 // instructions like:
859 // MOV X0, WideImmediate
860 // ADD X1, BaseReg, X0
862 // For such situation, using [BaseReg, XReg] addressing mode can save one
864 // MOV X0, WideImmediate
865 // LDR X2, [BaseReg, X0]
866 if (isa<ConstantSDNode>(RHS)) {
867 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
868 unsigned Scale = Log2_32(Size);
869 // Skip the immediate can be seleced by load/store addressing mode.
870 // Also skip the immediate can be encoded by a single ADD (SUB is also
871 // checked by using -ImmOff).
872 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
873 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
876 SDValue Ops[] = { RHS };
878 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
879 SDValue MOVIV = SDValue(MOVI, 0);
880 // This ADD of two X register will be selected into [Reg+Reg] mode.
881 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
884 // Remember if it is worth folding N when it produces extended register.
885 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
887 // Try to match a shifted extend on the RHS.
888 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
889 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
891 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
895 // Try to match a shifted extend on the LHS.
896 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
897 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
899 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
903 // Match any non-shifted, non-extend, non-immediate add expression.
906 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
907 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
908 // Reg1 + Reg2 is free: no check needed.
912 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
913 static const unsigned RegClassIDs[] = {
914 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
915 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
916 AArch64::dsub2, AArch64::dsub3};
918 return createTuple(Regs, RegClassIDs, SubRegs);
921 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
922 static const unsigned RegClassIDs[] = {
923 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
924 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
925 AArch64::qsub2, AArch64::qsub3};
927 return createTuple(Regs, RegClassIDs, SubRegs);
930 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
931 const unsigned RegClassIDs[],
932 const unsigned SubRegs[]) {
933 // There's no special register-class for a vector-list of 1 element: it's just
935 if (Regs.size() == 1)
938 assert(Regs.size() >= 2 && Regs.size() <= 4);
942 SmallVector<SDValue, 4> Ops;
944 // First operand of REG_SEQUENCE is the desired RegClass.
946 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
948 // Then we get pairs of source & subregister-position for the components.
949 for (unsigned i = 0; i < Regs.size(); ++i) {
950 Ops.push_back(Regs[i]);
951 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
955 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
956 return SDValue(N, 0);
959 SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
960 unsigned Opc, bool isExt) {
962 EVT VT = N->getValueType(0);
964 unsigned ExtOff = isExt;
966 // Form a REG_SEQUENCE to force register allocation.
967 unsigned Vec0Off = ExtOff + 1;
968 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
969 N->op_begin() + Vec0Off + NumVecs);
970 SDValue RegSeq = createQTuple(Regs);
972 SmallVector<SDValue, 6> Ops;
974 Ops.push_back(N->getOperand(1));
975 Ops.push_back(RegSeq);
976 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
977 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
980 SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
981 LoadSDNode *LD = cast<LoadSDNode>(N);
982 if (LD->isUnindexed())
984 EVT VT = LD->getMemoryVT();
985 EVT DstVT = N->getValueType(0);
986 ISD::MemIndexedMode AM = LD->getAddressingMode();
987 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
989 // We're not doing validity checking here. That was done when checking
990 // if we should mark the load as indexed or not. We're just selecting
991 // the right instruction.
994 ISD::LoadExtType ExtType = LD->getExtensionType();
995 bool InsertTo64 = false;
997 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
998 else if (VT == MVT::i32) {
999 if (ExtType == ISD::NON_EXTLOAD)
1000 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1001 else if (ExtType == ISD::SEXTLOAD)
1002 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1004 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1006 // The result of the load is only i32. It's the subreg_to_reg that makes
1010 } else if (VT == MVT::i16) {
1011 if (ExtType == ISD::SEXTLOAD) {
1012 if (DstVT == MVT::i64)
1013 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1015 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1017 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1018 InsertTo64 = DstVT == MVT::i64;
1019 // The result of the load is only i32. It's the subreg_to_reg that makes
1023 } else if (VT == MVT::i8) {
1024 if (ExtType == ISD::SEXTLOAD) {
1025 if (DstVT == MVT::i64)
1026 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1028 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1030 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1031 InsertTo64 = DstVT == MVT::i64;
1032 // The result of the load is only i32. It's the subreg_to_reg that makes
1036 } else if (VT == MVT::f32) {
1037 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1038 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1039 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1040 } else if (VT.is128BitVector()) {
1041 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1044 SDValue Chain = LD->getChain();
1045 SDValue Base = LD->getBasePtr();
1046 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1047 int OffsetVal = (int)OffsetOp->getZExtValue();
1049 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
1050 SDValue Ops[] = { Base, Offset, Chain };
1051 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
1053 // Either way, we're replacing the node, so tell the caller that.
1055 SDValue LoadedVal = SDValue(Res, 1);
1057 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1059 SDValue(CurDAG->getMachineNode(
1060 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1061 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1066 ReplaceUses(SDValue(N, 0), LoadedVal);
1067 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1068 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1073 SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
1074 unsigned Opc, unsigned SubRegIdx) {
1076 EVT VT = N->getValueType(0);
1077 SDValue Chain = N->getOperand(0);
1079 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1082 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1084 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1085 SDValue SuperReg = SDValue(Ld, 0);
1086 for (unsigned i = 0; i < NumVecs; ++i)
1087 ReplaceUses(SDValue(N, i),
1088 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1090 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1094 SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1095 unsigned Opc, unsigned SubRegIdx) {
1097 EVT VT = N->getValueType(0);
1098 SDValue Chain = N->getOperand(0);
1100 SDValue Ops[] = {N->getOperand(1), // Mem operand
1101 N->getOperand(2), // Incremental
1104 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1105 MVT::Untyped, MVT::Other};
1107 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1109 // Update uses of write back register
1110 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1112 // Update uses of vector list
1113 SDValue SuperReg = SDValue(Ld, 1);
1115 ReplaceUses(SDValue(N, 0), SuperReg);
1117 for (unsigned i = 0; i < NumVecs; ++i)
1118 ReplaceUses(SDValue(N, i),
1119 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1122 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1126 SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1129 EVT VT = N->getOperand(2)->getValueType(0);
1131 // Form a REG_SEQUENCE to force register allocation.
1132 bool Is128Bit = VT.getSizeInBits() == 128;
1133 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1134 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1136 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
1137 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1142 SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1145 EVT VT = N->getOperand(2)->getValueType(0);
1146 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1147 MVT::Other}; // Type for the Chain
1149 // Form a REG_SEQUENCE to force register allocation.
1150 bool Is128Bit = VT.getSizeInBits() == 128;
1151 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1152 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1154 SDValue Ops[] = {RegSeq,
1155 N->getOperand(NumVecs + 1), // base register
1156 N->getOperand(NumVecs + 2), // Incremental
1157 N->getOperand(0)}; // Chain
1158 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1164 /// WidenVector - Given a value in the V64 register class, produce the
1165 /// equivalent value in the V128 register class.
1170 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1172 SDValue operator()(SDValue V64Reg) {
1173 EVT VT = V64Reg.getValueType();
1174 unsigned NarrowSize = VT.getVectorNumElements();
1175 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1176 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1180 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1181 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1186 /// NarrowVector - Given a value in the V128 register class, produce the
1187 /// equivalent value in the V64 register class.
1188 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1189 EVT VT = V128Reg.getValueType();
1190 unsigned WideSize = VT.getVectorNumElements();
1191 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1192 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1194 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1198 SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1201 EVT VT = N->getValueType(0);
1202 bool Narrow = VT.getSizeInBits() == 64;
1204 // Form a REG_SEQUENCE to force register allocation.
1205 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1208 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1209 WidenVector(*CurDAG));
1211 SDValue RegSeq = createQTuple(Regs);
1213 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1216 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1218 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1219 N->getOperand(NumVecs + 3), N->getOperand(0)};
1220 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1221 SDValue SuperReg = SDValue(Ld, 0);
1223 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1224 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1226 for (unsigned i = 0; i < NumVecs; ++i) {
1227 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1229 NV = NarrowVector(NV, *CurDAG);
1230 ReplaceUses(SDValue(N, i), NV);
1233 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1238 SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1241 EVT VT = N->getValueType(0);
1242 bool Narrow = VT.getSizeInBits() == 64;
1244 // Form a REG_SEQUENCE to force register allocation.
1245 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1248 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1249 WidenVector(*CurDAG));
1251 SDValue RegSeq = createQTuple(Regs);
1253 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1254 RegSeq->getValueType(0), MVT::Other};
1257 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1259 SDValue Ops[] = {RegSeq,
1260 CurDAG->getTargetConstant(LaneNo, dl,
1261 MVT::i64), // Lane Number
1262 N->getOperand(NumVecs + 2), // Base register
1263 N->getOperand(NumVecs + 3), // Incremental
1265 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1267 // Update uses of the write back register
1268 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1270 // Update uses of the vector list
1271 SDValue SuperReg = SDValue(Ld, 1);
1273 ReplaceUses(SDValue(N, 0),
1274 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1276 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1277 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1279 for (unsigned i = 0; i < NumVecs; ++i) {
1280 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1283 NV = NarrowVector(NV, *CurDAG);
1284 ReplaceUses(SDValue(N, i), NV);
1289 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1294 SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1297 EVT VT = N->getOperand(2)->getValueType(0);
1298 bool Narrow = VT.getSizeInBits() == 64;
1300 // Form a REG_SEQUENCE to force register allocation.
1301 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1304 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1305 WidenVector(*CurDAG));
1307 SDValue RegSeq = createQTuple(Regs);
1310 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1312 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1313 N->getOperand(NumVecs + 3), N->getOperand(0)};
1314 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1316 // Transfer memoperands.
1317 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1318 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1319 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1324 SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1327 EVT VT = N->getOperand(2)->getValueType(0);
1328 bool Narrow = VT.getSizeInBits() == 64;
1330 // Form a REG_SEQUENCE to force register allocation.
1331 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1334 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1335 WidenVector(*CurDAG));
1337 SDValue RegSeq = createQTuple(Regs);
1339 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1343 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1345 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1346 N->getOperand(NumVecs + 2), // Base Register
1347 N->getOperand(NumVecs + 3), // Incremental
1349 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1351 // Transfer memoperands.
1352 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1353 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1354 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1359 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1360 unsigned &Opc, SDValue &Opd0,
1361 unsigned &LSB, unsigned &MSB,
1362 unsigned NumberOfIgnoredLowBits,
1363 bool BiggerPattern) {
1364 assert(N->getOpcode() == ISD::AND &&
1365 "N must be a AND operation to call this function");
1367 EVT VT = N->getValueType(0);
1369 // Here we can test the type of VT and return false when the type does not
1370 // match, but since it is done prior to that call in the current context
1371 // we turned that into an assert to avoid redundant code.
1372 assert((VT == MVT::i32 || VT == MVT::i64) &&
1373 "Type checking must have been done before calling this function");
1375 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1376 // changed the AND node to a 32-bit mask operation. We'll have to
1377 // undo that as part of the transform here if we want to catch all
1378 // the opportunities.
1379 // Currently the NumberOfIgnoredLowBits argument helps to recover
1380 // form these situations when matching bigger pattern (bitfield insert).
1382 // For unsigned extracts, check for a shift right and mask
1383 uint64_t And_imm = 0;
1384 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1387 const SDNode *Op0 = N->getOperand(0).getNode();
1389 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1390 // simplified. Try to undo that
1391 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1393 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1394 if (And_imm & (And_imm + 1))
1397 bool ClampMSB = false;
1398 uint64_t Srl_imm = 0;
1399 // Handle the SRL + ANY_EXTEND case.
1400 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1401 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1402 // Extend the incoming operand of the SRL to 64-bit.
1403 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1404 // Make sure to clamp the MSB so that we preserve the semantics of the
1405 // original operations.
1407 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1408 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1410 // If the shift result was truncated, we can still combine them.
1411 Opd0 = Op0->getOperand(0).getOperand(0);
1413 // Use the type of SRL node.
1414 VT = Opd0->getValueType(0);
1415 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1416 Opd0 = Op0->getOperand(0);
1417 } else if (BiggerPattern) {
1418 // Let's pretend a 0 shift right has been performed.
1419 // The resulting code will be at least as good as the original one
1420 // plus it may expose more opportunities for bitfield insert pattern.
1421 // FIXME: Currently we limit this to the bigger pattern, because
1422 // some optimizations expect AND and not UBFM
1423 Opd0 = N->getOperand(0);
1427 // Bail out on large immediates. This happens when no proper
1428 // combining/constant folding was performed.
1429 if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
1431 << ": Found large shift immediate, this should not happen\n"));
1436 MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
1437 : countTrailingOnes<uint64_t>(And_imm)) -
1440 // Since we're moving the extend before the right shift operation, we need
1441 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1442 // the zeros which would get shifted in with the original right shift
1444 MSB = MSB > 31 ? 31 : MSB;
1446 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1450 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1451 SDValue &Opd0, unsigned &LSB,
1453 // We are looking for the following pattern which basically extracts several
1454 // continuous bits from the source value and places it from the LSB of the
1455 // destination value, all other bits of the destination value or set to zero:
1457 // Value2 = AND Value, MaskImm
1458 // SRL Value2, ShiftImm
1460 // with MaskImm >> ShiftImm to search for the bit width.
1462 // This gets selected into a single UBFM:
1464 // UBFM Value, ShiftImm, BitWide + Srl_imm -1
1467 if (N->getOpcode() != ISD::SRL)
1470 uint64_t And_mask = 0;
1471 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1474 Opd0 = N->getOperand(0).getOperand(0);
1476 uint64_t Srl_imm = 0;
1477 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1480 // Check whether we really have several bits extract here.
1481 unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
1482 if (BitWide && isMask_64(And_mask >> Srl_imm)) {
1483 if (N->getValueType(0) == MVT::i32)
1484 Opc = AArch64::UBFMWri;
1486 Opc = AArch64::UBFMXri;
1489 MSB = BitWide + Srl_imm - 1;
1496 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1497 unsigned &LSB, unsigned &MSB,
1498 bool BiggerPattern) {
1499 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1500 "N must be a SHR/SRA operation to call this function");
1502 EVT VT = N->getValueType(0);
1504 // Here we can test the type of VT and return false when the type does not
1505 // match, but since it is done prior to that call in the current context
1506 // we turned that into an assert to avoid redundant code.
1507 assert((VT == MVT::i32 || VT == MVT::i64) &&
1508 "Type checking must have been done before calling this function");
1510 // Check for AND + SRL doing several bits extract.
1511 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
1514 // we're looking for a shift of a shift
1515 uint64_t Shl_imm = 0;
1516 uint64_t Trunc_bits = 0;
1517 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1518 Opd0 = N->getOperand(0).getOperand(0);
1519 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1520 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1521 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1522 // be considered as setting high 32 bits as zero. Our strategy here is to
1523 // always generate 64bit UBFM. This consistency will help the CSE pass
1524 // later find more redundancy.
1525 Opd0 = N->getOperand(0).getOperand(0);
1526 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1527 VT = Opd0->getValueType(0);
1528 assert(VT == MVT::i64 && "the promoted type should be i64");
1529 } else if (BiggerPattern) {
1530 // Let's pretend a 0 shift left has been performed.
1531 // FIXME: Currently we limit this to the bigger pattern case,
1532 // because some optimizations expect AND and not UBFM
1533 Opd0 = N->getOperand(0);
1537 // Missing combines/constant folding may have left us with strange
1539 if (Shl_imm >= VT.getSizeInBits()) {
1541 << ": Found large shift immediate, this should not happen\n"));
1545 uint64_t Srl_imm = 0;
1546 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1549 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1550 "bad amount in shift node!");
1551 // Note: The width operand is encoded as width-1.
1552 unsigned Width = VT.getSizeInBits() - Trunc_bits - Srl_imm - 1;
1553 int sLSB = Srl_imm - Shl_imm;
1558 // SRA requires a signed extraction
1560 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1562 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1566 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1567 SDValue &Opd0, unsigned &LSB, unsigned &MSB,
1568 unsigned NumberOfIgnoredLowBits = 0,
1569 bool BiggerPattern = false) {
1570 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1573 switch (N->getOpcode()) {
1575 if (!N->isMachineOpcode())
1579 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
1580 NumberOfIgnoredLowBits, BiggerPattern);
1583 return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
1586 unsigned NOpc = N->getMachineOpcode();
1590 case AArch64::SBFMWri:
1591 case AArch64::UBFMWri:
1592 case AArch64::SBFMXri:
1593 case AArch64::UBFMXri:
1595 Opd0 = N->getOperand(0);
1596 LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1597 MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1604 SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1605 unsigned Opc, LSB, MSB;
1607 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
1610 EVT VT = N->getValueType(0);
1613 // If the bit extract operation is 64bit but the original type is 32bit, we
1614 // need to add one EXTRACT_SUBREG.
1615 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
1616 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, dl, MVT::i64),
1617 CurDAG->getTargetConstant(MSB, dl, MVT::i64)};
1619 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
1620 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1621 MachineSDNode *Node =
1622 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32,
1623 SDValue(BFM, 0), SubReg);
1627 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, dl, VT),
1628 CurDAG->getTargetConstant(MSB, dl, VT)};
1629 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1632 /// Does DstMask form a complementary pair with the mask provided by
1633 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1634 /// this asks whether DstMask zeroes precisely those bits that will be set by
1636 static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1637 unsigned NumberOfIgnoredHighBits, EVT VT) {
1638 assert((VT == MVT::i32 || VT == MVT::i64) &&
1639 "i32 or i64 mask type expected!");
1640 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1642 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1643 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1645 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1646 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1649 // Look for bits that will be useful for later uses.
1650 // A bit is consider useless as soon as it is dropped and never used
1651 // before it as been dropped.
1652 // E.g., looking for useful bit of x
1655 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1657 // After #2, the useful bits of x are 0x4.
1658 // However, if x is used on an unpredicatable instruction, then all its bits
1664 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1666 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1669 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1670 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1671 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1672 getUsefulBits(Op, UsefulBits, Depth + 1);
1675 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1676 uint64_t Imm, uint64_t MSB,
1678 // inherit the bitwidth value
1679 APInt OpUsefulBits(UsefulBits);
1683 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1685 // The interesting part will be in the lower part of the result
1686 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1687 // The interesting part was starting at Imm in the argument
1688 OpUsefulBits = OpUsefulBits.shl(Imm);
1690 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1692 // The interesting part will be shifted in the result
1693 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1694 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1695 // The interesting part was at zero in the argument
1696 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1699 UsefulBits &= OpUsefulBits;
1702 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1705 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1707 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1709 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1712 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1714 uint64_t ShiftTypeAndValue =
1715 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1716 APInt Mask(UsefulBits);
1717 Mask.clearAllBits();
1720 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1722 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1723 Mask = Mask.shl(ShiftAmt);
1724 getUsefulBits(Op, Mask, Depth + 1);
1725 Mask = Mask.lshr(ShiftAmt);
1726 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1728 // We do not handle AArch64_AM::ASR, because the sign will change the
1729 // number of useful bits
1730 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1731 Mask = Mask.lshr(ShiftAmt);
1732 getUsefulBits(Op, Mask, Depth + 1);
1733 Mask = Mask.shl(ShiftAmt);
1740 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1743 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1745 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1747 if (Op.getOperand(1) == Orig)
1748 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1750 APInt OpUsefulBits(UsefulBits);
1754 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1756 UsefulBits &= ~OpUsefulBits;
1757 getUsefulBits(Op, UsefulBits, Depth + 1);
1759 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1761 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1762 getUsefulBits(Op, UsefulBits, Depth + 1);
1766 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1767 SDValue Orig, unsigned Depth) {
1769 // Users of this node should have already been instruction selected
1770 // FIXME: Can we turn that into an assert?
1771 if (!UserNode->isMachineOpcode())
1774 switch (UserNode->getMachineOpcode()) {
1777 case AArch64::ANDSWri:
1778 case AArch64::ANDSXri:
1779 case AArch64::ANDWri:
1780 case AArch64::ANDXri:
1781 // We increment Depth only when we call the getUsefulBits
1782 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1784 case AArch64::UBFMWri:
1785 case AArch64::UBFMXri:
1786 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1788 case AArch64::ORRWrs:
1789 case AArch64::ORRXrs:
1790 if (UserNode->getOperand(1) != Orig)
1792 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1794 case AArch64::BFMWri:
1795 case AArch64::BFMXri:
1796 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1800 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1803 // Initialize UsefulBits
1805 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1806 // At the beginning, assume every produced bits is useful
1807 UsefulBits = APInt(Bitwidth, 0);
1808 UsefulBits.flipAllBits();
1810 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1812 for (SDNode *Node : Op.getNode()->uses()) {
1813 // A use cannot produce useful bits
1814 APInt UsefulBitsForUse = APInt(UsefulBits);
1815 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1816 UsersUsefulBits |= UsefulBitsForUse;
1818 // UsefulBits contains the produced bits that are meaningful for the
1819 // current definition, thus a user cannot make a bit meaningful at
1821 UsefulBits &= UsersUsefulBits;
1824 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
1825 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1826 /// 0, return Op unchanged.
1827 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1831 EVT VT = Op.getValueType();
1833 unsigned BitWidth = VT.getSizeInBits();
1834 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1837 if (ShlAmount > 0) {
1838 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1839 ShiftNode = CurDAG->getMachineNode(
1840 UBFMOpc, dl, VT, Op,
1841 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
1842 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
1844 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1845 assert(ShlAmount < 0 && "expected right shift");
1846 int ShrAmount = -ShlAmount;
1847 ShiftNode = CurDAG->getMachineNode(
1848 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
1849 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
1852 return SDValue(ShiftNode, 0);
1855 /// Does this tree qualify as an attempt to move a bitfield into position,
1856 /// essentially "(and (shl VAL, N), Mask)".
1857 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1858 SDValue &Src, int &ShiftAmount,
1860 EVT VT = Op.getValueType();
1861 unsigned BitWidth = VT.getSizeInBits();
1863 assert(BitWidth == 32 || BitWidth == 64);
1865 APInt KnownZero, KnownOne;
1866 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1868 // Non-zero in the sense that they're not provably zero, which is the key
1869 // point if we want to use this value
1870 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1872 // Discard a constant AND mask if present. It's safe because the node will
1873 // already have been factored into the computeKnownBits calculation above.
1875 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1876 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1877 Op = Op.getOperand(0);
1881 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1883 Op = Op.getOperand(0);
1885 if (!isShiftedMask_64(NonZeroBits))
1888 ShiftAmount = countTrailingZeros(NonZeroBits);
1889 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
1891 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1892 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1894 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1899 // Given a OR operation, check if we have the following pattern
1900 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1901 // isBitfieldExtractOp)
1902 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1903 // countTrailingZeros(mask2) == imm2 - imm + 1
1905 // if yes, given reference arguments will be update so that one can replace
1906 // the OR instruction with:
1907 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1908 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1909 SDValue &Src, unsigned &ImmR,
1910 unsigned &ImmS, SelectionDAG *CurDAG) {
1911 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1914 EVT VT = N->getValueType(0);
1916 Opc = AArch64::BFMWri;
1917 else if (VT == MVT::i64)
1918 Opc = AArch64::BFMXri;
1922 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1923 // have the expected shape. Try to undo that.
1925 getUsefulBits(SDValue(N, 0), UsefulBits);
1927 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1928 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1930 // OR is commutative, check both possibilities (does llvm provide a
1931 // way to do that directely, e.g., via code matcher?)
1932 SDValue OrOpd1Val = N->getOperand(1);
1933 SDNode *OrOpd0 = N->getOperand(0).getNode();
1934 SDNode *OrOpd1 = N->getOperand(1).getNode();
1935 for (int i = 0; i < 2;
1936 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1939 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
1940 NumberOfIgnoredLowBits, true)) {
1941 // Check that the returned opcode is compatible with the pattern,
1942 // i.e., same type and zero extended (U and not S)
1943 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
1944 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
1947 // Compute the width of the bitfield insertion
1949 Width = ImmS - ImmR + 1;
1950 // FIXME: This constraint is to catch bitfield insertion we may
1951 // want to widen the pattern if we want to grab general bitfied
1956 // If the mask on the insertee is correct, we have a BFXIL operation. We
1957 // can share the ImmR and ImmS values from the already-computed UBFM.
1958 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
1960 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
1965 // Check the second part of the pattern
1966 EVT VT = OrOpd1->getValueType(0);
1967 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
1969 // Compute the Known Zero for the candidate of the first operand.
1970 // This allows to catch more general case than just looking for
1971 // AND with imm. Indeed, simplify-demanded-bits may have removed
1972 // the AND instruction because it proves it was useless.
1973 APInt KnownZero, KnownOne;
1974 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
1976 // Check if there is enough room for the second operand to appear
1978 APInt BitsToBeInserted =
1979 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
1981 if ((BitsToBeInserted & ~KnownZero) != 0)
1984 // Set the first operand
1986 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1987 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
1988 // In that case, we can eliminate the AND
1989 Dst = OrOpd1->getOperand(0);
1991 // Maybe the AND has been removed by simplify-demanded-bits
1992 // or is useful because it discards more bits
2002 SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
2003 if (N->getOpcode() != ISD::OR)
2010 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
2013 EVT VT = N->getValueType(0);
2015 SDValue Ops[] = { Opd0,
2017 CurDAG->getTargetConstant(LSB, dl, VT),
2018 CurDAG->getTargetConstant(MSB, dl, VT) };
2019 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2022 SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) {
2023 EVT VT = N->getValueType(0);
2026 unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
2028 if (VT == MVT::f32) {
2030 } else if (VT == MVT::f64) {
2033 return nullptr; // Unrecognized argument type. Fall back on default codegen.
2035 // Pick the FRINTX variant needed to set the flags.
2036 unsigned FRINTXOpc = FRINTXOpcs[Variant];
2038 switch (N->getOpcode()) {
2040 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
2042 unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
2043 Opc = FRINTPOpcs[Variant];
2047 unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
2048 Opc = FRINTMOpcs[Variant];
2052 unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
2053 Opc = FRINTZOpcs[Variant];
2057 unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
2058 Opc = FRINTAOpcs[Variant];
2064 SDValue In = N->getOperand(0);
2065 SmallVector<SDValue, 2> Ops;
2068 if (!TM.Options.UnsafeFPMath) {
2069 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
2070 Ops.push_back(SDValue(FRINTX, 1));
2073 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2077 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2078 unsigned RegWidth) {
2080 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2081 FVal = CN->getValueAPF();
2082 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2083 // Some otherwise illegal constants are allowed in this case.
2084 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2085 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2088 ConstantPoolSDNode *CN =
2089 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2090 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2094 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2095 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2098 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2099 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2103 // fbits is between 1 and 64 in the worst-case, which means the fmul
2104 // could have 2^64 as an actual operand. Need 65 bits of precision.
2105 APSInt IntVal(65, true);
2106 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2108 // N.b. isPowerOf2 also checks for > 0.
2109 if (!IsExact || !IntVal.isPowerOf2()) return false;
2110 unsigned FBits = IntVal.logBase2();
2112 // Checks above should have guaranteed that we haven't lost information in
2113 // finding FBits, but it must still be in range.
2114 if (FBits == 0 || FBits > RegWidth) return false;
2116 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
2120 // Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2121 // of the string and obtains the integer values from them and combines these
2122 // into a single value to be used in the MRS/MSR instruction.
2123 static int getIntOperandFromRegisterString(StringRef RegString) {
2124 SmallVector<StringRef, 5> Fields;
2125 RegString.split(Fields, ":");
2127 if (Fields.size() == 1)
2130 assert(Fields.size() == 5
2131 && "Invalid number of fields in read register string");
2133 SmallVector<int, 5> Ops;
2134 bool AllIntFields = true;
2136 for (StringRef Field : Fields) {
2138 AllIntFields &= !Field.getAsInteger(10, IntField);
2139 Ops.push_back(IntField);
2142 assert(AllIntFields &&
2143 "Unexpected non-integer value in special register string.");
2145 // Need to combine the integer fields of the string into a single value
2146 // based on the bit encoding of MRS/MSR instruction.
2147 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2148 (Ops[3] << 3) | (Ops[4]);
2151 // Lower the read_register intrinsic to an MRS instruction node if the special
2152 // register string argument is either of the form detailed in the ALCE (the
2153 // form described in getIntOperandsFromRegsterString) or is a named register
2154 // known by the MRS SysReg mapper.
2155 SDNode *AArch64DAGToDAGISel::SelectReadRegister(SDNode *N) {
2156 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2157 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2160 int Reg = getIntOperandFromRegisterString(RegString->getString());
2162 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2164 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2167 // Use the sysreg mapper to map the remaining possible strings to the
2168 // value for the register to be used for the instruction operand.
2169 AArch64SysReg::MRSMapper mapper;
2170 bool IsValidSpecialReg;
2171 Reg = mapper.fromString(RegString->getString(),
2172 Subtarget->getFeatureBits(),
2174 if (IsValidSpecialReg)
2175 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2177 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2183 // Lower the write_register intrinsic to an MSR instruction node if the special
2184 // register string argument is either of the form detailed in the ALCE (the
2185 // form described in getIntOperandsFromRegsterString) or is a named register
2186 // known by the MSR SysReg mapper.
2187 SDNode *AArch64DAGToDAGISel::SelectWriteRegister(SDNode *N) {
2188 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2189 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2192 int Reg = getIntOperandFromRegisterString(RegString->getString());
2194 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2195 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2196 N->getOperand(2), N->getOperand(0));
2198 // Check if the register was one of those allowed as the pstatefield value in
2199 // the MSR (immediate) instruction. To accept the values allowed in the
2200 // pstatefield for the MSR (immediate) instruction, we also require that an
2201 // immediate value has been provided as an argument, we know that this is
2202 // the case as it has been ensured by semantic checking.
2203 AArch64PState::PStateMapper PMapper;
2204 bool IsValidSpecialReg;
2205 Reg = PMapper.fromString(RegString->getString(),
2206 Subtarget->getFeatureBits(),
2208 if (IsValidSpecialReg) {
2209 assert (isa<ConstantSDNode>(N->getOperand(2))
2210 && "Expected a constant integer expression.");
2211 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
2212 return CurDAG->getMachineNode(AArch64::MSRpstate, DL, MVT::Other,
2213 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2214 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
2218 // Use the sysreg mapper to attempt to map the remaining possible strings
2219 // to the value for the register to be used for the MSR (register)
2220 // instruction operand.
2221 AArch64SysReg::MSRMapper Mapper;
2222 Reg = Mapper.fromString(RegString->getString(),
2223 Subtarget->getFeatureBits(),
2226 if (IsValidSpecialReg)
2227 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2228 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2229 N->getOperand(2), N->getOperand(0));
2234 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
2235 // Dump information about the Node being selected
2236 DEBUG(errs() << "Selecting: ");
2237 DEBUG(Node->dump(CurDAG));
2238 DEBUG(errs() << "\n");
2240 // If we have a custom node, we already have selected!
2241 if (Node->isMachineOpcode()) {
2242 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2243 Node->setNodeId(-1);
2247 // Few custom selection stuff.
2248 SDNode *ResNode = nullptr;
2249 EVT VT = Node->getValueType(0);
2251 switch (Node->getOpcode()) {
2255 case ISD::READ_REGISTER:
2256 if (SDNode *Res = SelectReadRegister(Node))
2260 case ISD::WRITE_REGISTER:
2261 if (SDNode *Res = SelectWriteRegister(Node))
2266 if (SDNode *I = SelectMLAV64LaneV128(Node))
2271 // Try to select as an indexed load. Fall through to normal processing
2274 SDNode *I = SelectIndexedLoad(Node, Done);
2283 if (SDNode *I = SelectBitfieldExtractOp(Node))
2288 if (SDNode *I = SelectBitfieldInsertOp(Node))
2292 case ISD::EXTRACT_VECTOR_ELT: {
2293 // Extracting lane zero is a special case where we can just use a plain
2294 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2295 // the rest of the compiler, especially the register allocator and copyi
2296 // propagation, to reason about, so is preferred when it's possible to
2298 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2299 // Bail and use the default Select() for non-zero lanes.
2300 if (LaneNode->getZExtValue() != 0)
2302 // If the element type is not the same as the result type, likewise
2303 // bail and use the default Select(), as there's more to do than just
2304 // a cross-class COPY. This catches extracts of i8 and i16 elements
2305 // since they will need an explicit zext.
2306 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2309 switch (Node->getOperand(0)
2311 .getVectorElementType()
2314 llvm_unreachable("Unexpected vector element type!");
2316 SubReg = AArch64::dsub;
2319 SubReg = AArch64::ssub;
2322 SubReg = AArch64::hsub;
2325 llvm_unreachable("unexpected zext-requiring extract element!");
2327 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2328 Node->getOperand(0));
2329 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2330 DEBUG(Extract->dumpr(CurDAG));
2331 DEBUG(dbgs() << "\n");
2332 return Extract.getNode();
2334 case ISD::Constant: {
2335 // Materialize zero constants as copies from WZR/XZR. This allows
2336 // the coalescer to propagate these into other instructions.
2337 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2338 if (ConstNode->isNullValue()) {
2340 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2341 AArch64::WZR, MVT::i32).getNode();
2342 else if (VT == MVT::i64)
2343 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2344 AArch64::XZR, MVT::i64).getNode();
2349 case ISD::FrameIndex: {
2350 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2351 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2352 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2353 const TargetLowering *TLI = getTargetLowering();
2354 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
2356 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
2357 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
2358 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
2360 case ISD::INTRINSIC_W_CHAIN: {
2361 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2365 case Intrinsic::aarch64_ldaxp:
2366 case Intrinsic::aarch64_ldxp: {
2368 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2369 SDValue MemAddr = Node->getOperand(2);
2371 SDValue Chain = Node->getOperand(0);
2373 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2374 MVT::Other, MemAddr, Chain);
2376 // Transfer memoperands.
2377 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2378 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2379 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2382 case Intrinsic::aarch64_stlxp:
2383 case Intrinsic::aarch64_stxp: {
2385 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2387 SDValue Chain = Node->getOperand(0);
2388 SDValue ValLo = Node->getOperand(2);
2389 SDValue ValHi = Node->getOperand(3);
2390 SDValue MemAddr = Node->getOperand(4);
2392 // Place arguments in the right order.
2393 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
2395 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2396 // Transfer memoperands.
2397 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2398 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2399 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2403 case Intrinsic::aarch64_neon_ld1x2:
2404 if (VT == MVT::v8i8)
2405 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2406 else if (VT == MVT::v16i8)
2407 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
2408 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2409 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
2410 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2411 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2412 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2413 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2414 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2415 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2416 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2417 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2418 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2419 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2421 case Intrinsic::aarch64_neon_ld1x3:
2422 if (VT == MVT::v8i8)
2423 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2424 else if (VT == MVT::v16i8)
2425 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
2426 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2427 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
2428 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2429 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2430 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2431 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2432 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2433 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2434 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2435 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2436 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2437 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2439 case Intrinsic::aarch64_neon_ld1x4:
2440 if (VT == MVT::v8i8)
2441 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2442 else if (VT == MVT::v16i8)
2443 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
2444 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2445 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
2446 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2447 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2448 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2449 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2450 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2451 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2452 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2453 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2454 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2455 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2457 case Intrinsic::aarch64_neon_ld2:
2458 if (VT == MVT::v8i8)
2459 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2460 else if (VT == MVT::v16i8)
2461 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
2462 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2463 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
2464 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2465 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2466 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2467 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2468 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2469 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2470 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2471 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2472 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2473 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2475 case Intrinsic::aarch64_neon_ld3:
2476 if (VT == MVT::v8i8)
2477 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2478 else if (VT == MVT::v16i8)
2479 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
2480 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2481 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
2482 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2483 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2484 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2485 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2486 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2487 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2488 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2489 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2490 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2491 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2493 case Intrinsic::aarch64_neon_ld4:
2494 if (VT == MVT::v8i8)
2495 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2496 else if (VT == MVT::v16i8)
2497 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
2498 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2499 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
2500 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2501 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2502 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2503 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2504 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2505 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2506 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2507 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2508 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2509 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2511 case Intrinsic::aarch64_neon_ld2r:
2512 if (VT == MVT::v8i8)
2513 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2514 else if (VT == MVT::v16i8)
2515 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
2516 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2517 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
2518 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2519 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2520 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2521 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2522 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2523 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2524 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2525 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2526 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2527 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2529 case Intrinsic::aarch64_neon_ld3r:
2530 if (VT == MVT::v8i8)
2531 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2532 else if (VT == MVT::v16i8)
2533 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
2534 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2535 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
2536 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2537 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2538 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2539 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2540 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2541 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2542 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2543 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2544 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2545 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2547 case Intrinsic::aarch64_neon_ld4r:
2548 if (VT == MVT::v8i8)
2549 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2550 else if (VT == MVT::v16i8)
2551 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
2552 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2553 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
2554 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2555 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2556 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2557 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2558 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2559 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2560 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2561 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2562 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2563 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2565 case Intrinsic::aarch64_neon_ld2lane:
2566 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2567 return SelectLoadLane(Node, 2, AArch64::LD2i8);
2568 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2570 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2571 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2573 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2574 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2576 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2578 case Intrinsic::aarch64_neon_ld3lane:
2579 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2580 return SelectLoadLane(Node, 3, AArch64::LD3i8);
2581 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2583 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2584 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2586 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2587 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2589 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2591 case Intrinsic::aarch64_neon_ld4lane:
2592 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2593 return SelectLoadLane(Node, 4, AArch64::LD4i8);
2594 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2596 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2597 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2599 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2600 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2602 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2606 case ISD::INTRINSIC_WO_CHAIN: {
2607 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2611 case Intrinsic::aarch64_neon_tbl2:
2612 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
2613 : AArch64::TBLv16i8Two,
2615 case Intrinsic::aarch64_neon_tbl3:
2616 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
2617 : AArch64::TBLv16i8Three,
2619 case Intrinsic::aarch64_neon_tbl4:
2620 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
2621 : AArch64::TBLv16i8Four,
2623 case Intrinsic::aarch64_neon_tbx2:
2624 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
2625 : AArch64::TBXv16i8Two,
2627 case Intrinsic::aarch64_neon_tbx3:
2628 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
2629 : AArch64::TBXv16i8Three,
2631 case Intrinsic::aarch64_neon_tbx4:
2632 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
2633 : AArch64::TBXv16i8Four,
2635 case Intrinsic::aarch64_neon_smull:
2636 case Intrinsic::aarch64_neon_umull:
2637 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2643 case ISD::INTRINSIC_VOID: {
2644 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2645 if (Node->getNumOperands() >= 3)
2646 VT = Node->getOperand(2)->getValueType(0);
2650 case Intrinsic::aarch64_neon_st1x2: {
2651 if (VT == MVT::v8i8)
2652 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2653 else if (VT == MVT::v16i8)
2654 return SelectStore(Node, 2, AArch64::ST1Twov16b);
2655 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2656 return SelectStore(Node, 2, AArch64::ST1Twov4h);
2657 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2658 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2659 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2660 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2661 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2662 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2663 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2664 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2665 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2666 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2669 case Intrinsic::aarch64_neon_st1x3: {
2670 if (VT == MVT::v8i8)
2671 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2672 else if (VT == MVT::v16i8)
2673 return SelectStore(Node, 3, AArch64::ST1Threev16b);
2674 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2675 return SelectStore(Node, 3, AArch64::ST1Threev4h);
2676 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2677 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2678 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2679 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2680 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2681 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2682 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2683 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2684 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2685 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2688 case Intrinsic::aarch64_neon_st1x4: {
2689 if (VT == MVT::v8i8)
2690 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2691 else if (VT == MVT::v16i8)
2692 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
2693 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2694 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
2695 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2696 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2697 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2698 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2699 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2700 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2701 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2702 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2703 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2704 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2707 case Intrinsic::aarch64_neon_st2: {
2708 if (VT == MVT::v8i8)
2709 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2710 else if (VT == MVT::v16i8)
2711 return SelectStore(Node, 2, AArch64::ST2Twov16b);
2712 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2713 return SelectStore(Node, 2, AArch64::ST2Twov4h);
2714 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2715 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2716 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2717 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2718 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2719 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2720 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2721 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2722 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2723 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2726 case Intrinsic::aarch64_neon_st3: {
2727 if (VT == MVT::v8i8)
2728 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2729 else if (VT == MVT::v16i8)
2730 return SelectStore(Node, 3, AArch64::ST3Threev16b);
2731 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2732 return SelectStore(Node, 3, AArch64::ST3Threev4h);
2733 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2734 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2735 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2736 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2737 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2738 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2739 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2740 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2741 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2742 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2745 case Intrinsic::aarch64_neon_st4: {
2746 if (VT == MVT::v8i8)
2747 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2748 else if (VT == MVT::v16i8)
2749 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
2750 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2751 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
2752 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2753 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2754 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2755 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2756 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2757 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2758 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2759 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2760 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2761 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2764 case Intrinsic::aarch64_neon_st2lane: {
2765 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2766 return SelectStoreLane(Node, 2, AArch64::ST2i8);
2767 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2769 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2770 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2772 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2773 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2775 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2778 case Intrinsic::aarch64_neon_st3lane: {
2779 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2780 return SelectStoreLane(Node, 3, AArch64::ST3i8);
2781 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2783 return SelectStoreLane(Node, 3, AArch64::ST3i16);
2784 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2786 return SelectStoreLane(Node, 3, AArch64::ST3i32);
2787 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2789 return SelectStoreLane(Node, 3, AArch64::ST3i64);
2792 case Intrinsic::aarch64_neon_st4lane: {
2793 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2794 return SelectStoreLane(Node, 4, AArch64::ST4i8);
2795 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2797 return SelectStoreLane(Node, 4, AArch64::ST4i16);
2798 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2800 return SelectStoreLane(Node, 4, AArch64::ST4i32);
2801 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2803 return SelectStoreLane(Node, 4, AArch64::ST4i64);
2808 case AArch64ISD::LD2post: {
2809 if (VT == MVT::v8i8)
2810 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
2811 else if (VT == MVT::v16i8)
2812 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
2813 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2814 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
2815 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2816 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
2817 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2818 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
2819 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2820 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
2821 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2822 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2823 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2824 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
2827 case AArch64ISD::LD3post: {
2828 if (VT == MVT::v8i8)
2829 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
2830 else if (VT == MVT::v16i8)
2831 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
2832 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2833 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
2834 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2835 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
2836 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2837 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
2838 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2839 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
2840 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2841 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2842 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2843 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
2846 case AArch64ISD::LD4post: {
2847 if (VT == MVT::v8i8)
2848 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
2849 else if (VT == MVT::v16i8)
2850 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
2851 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2852 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
2853 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2854 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
2855 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2856 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
2857 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2858 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
2859 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2860 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2861 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2862 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
2865 case AArch64ISD::LD1x2post: {
2866 if (VT == MVT::v8i8)
2867 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
2868 else if (VT == MVT::v16i8)
2869 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
2870 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2871 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
2872 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2873 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
2874 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2875 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
2876 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2877 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
2878 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2879 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2880 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2881 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
2884 case AArch64ISD::LD1x3post: {
2885 if (VT == MVT::v8i8)
2886 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
2887 else if (VT == MVT::v16i8)
2888 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
2889 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2890 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
2891 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2892 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
2893 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2894 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
2895 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2896 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
2897 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2898 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2899 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2900 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
2903 case AArch64ISD::LD1x4post: {
2904 if (VT == MVT::v8i8)
2905 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
2906 else if (VT == MVT::v16i8)
2907 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
2908 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2909 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
2910 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2911 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
2912 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2913 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
2914 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2915 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
2916 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2917 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2918 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2919 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
2922 case AArch64ISD::LD1DUPpost: {
2923 if (VT == MVT::v8i8)
2924 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
2925 else if (VT == MVT::v16i8)
2926 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
2927 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2928 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
2929 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2930 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
2931 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2932 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
2933 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2934 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
2935 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2936 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
2937 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2938 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
2941 case AArch64ISD::LD2DUPpost: {
2942 if (VT == MVT::v8i8)
2943 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
2944 else if (VT == MVT::v16i8)
2945 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
2946 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2947 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
2948 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2949 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
2950 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2951 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
2952 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2953 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
2954 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2955 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
2956 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2957 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
2960 case AArch64ISD::LD3DUPpost: {
2961 if (VT == MVT::v8i8)
2962 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
2963 else if (VT == MVT::v16i8)
2964 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
2965 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2966 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
2967 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2968 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
2969 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2970 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
2971 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2972 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
2973 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2974 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
2975 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2976 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
2979 case AArch64ISD::LD4DUPpost: {
2980 if (VT == MVT::v8i8)
2981 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
2982 else if (VT == MVT::v16i8)
2983 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
2984 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2985 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
2986 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2987 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
2988 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2989 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
2990 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2991 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
2992 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2993 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
2994 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2995 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
2998 case AArch64ISD::LD1LANEpost: {
2999 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3000 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
3001 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3003 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
3004 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3006 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
3007 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3009 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
3012 case AArch64ISD::LD2LANEpost: {
3013 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3014 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
3015 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3017 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
3018 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3020 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
3021 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3023 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
3026 case AArch64ISD::LD3LANEpost: {
3027 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3028 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
3029 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3031 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
3032 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3034 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
3035 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3037 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
3040 case AArch64ISD::LD4LANEpost: {
3041 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3042 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
3043 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3045 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
3046 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3048 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
3049 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3051 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
3054 case AArch64ISD::ST2post: {
3055 VT = Node->getOperand(1).getValueType();
3056 if (VT == MVT::v8i8)
3057 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
3058 else if (VT == MVT::v16i8)
3059 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
3060 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3061 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
3062 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3063 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
3064 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3065 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
3066 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3067 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
3068 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3069 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
3070 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3071 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3074 case AArch64ISD::ST3post: {
3075 VT = Node->getOperand(1).getValueType();
3076 if (VT == MVT::v8i8)
3077 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
3078 else if (VT == MVT::v16i8)
3079 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
3080 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3081 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
3082 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3083 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
3084 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3085 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
3086 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3087 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
3088 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3089 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
3090 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3091 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3094 case AArch64ISD::ST4post: {
3095 VT = Node->getOperand(1).getValueType();
3096 if (VT == MVT::v8i8)
3097 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
3098 else if (VT == MVT::v16i8)
3099 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
3100 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3101 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
3102 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3103 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
3104 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3105 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
3106 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3107 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
3108 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3109 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
3110 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3111 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3114 case AArch64ISD::ST1x2post: {
3115 VT = Node->getOperand(1).getValueType();
3116 if (VT == MVT::v8i8)
3117 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
3118 else if (VT == MVT::v16i8)
3119 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
3120 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3121 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
3122 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3123 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
3124 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3125 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
3126 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3127 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
3128 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3129 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3130 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3131 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
3134 case AArch64ISD::ST1x3post: {
3135 VT = Node->getOperand(1).getValueType();
3136 if (VT == MVT::v8i8)
3137 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
3138 else if (VT == MVT::v16i8)
3139 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
3140 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3141 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
3142 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3143 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
3144 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3145 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
3146 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3147 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
3148 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3149 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3150 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3151 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
3154 case AArch64ISD::ST1x4post: {
3155 VT = Node->getOperand(1).getValueType();
3156 if (VT == MVT::v8i8)
3157 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
3158 else if (VT == MVT::v16i8)
3159 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
3160 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3161 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
3162 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3163 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
3164 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3165 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
3166 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3167 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3168 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3169 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3170 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3171 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3174 case AArch64ISD::ST2LANEpost: {
3175 VT = Node->getOperand(1).getValueType();
3176 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3177 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
3178 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3180 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3181 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3183 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3184 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3186 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3189 case AArch64ISD::ST3LANEpost: {
3190 VT = Node->getOperand(1).getValueType();
3191 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3192 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
3193 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3195 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3196 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3198 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3199 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3201 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3204 case AArch64ISD::ST4LANEpost: {
3205 VT = Node->getOperand(1).getValueType();
3206 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3207 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
3208 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3210 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3211 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3213 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3214 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3216 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3224 if (SDNode *I = SelectLIBM(Node))
3229 // Select the default instruction
3230 ResNode = SelectCode(Node);
3232 DEBUG(errs() << "=> ");
3233 if (ResNode == nullptr || ResNode == Node)
3234 DEBUG(Node->dump(CurDAG));
3236 DEBUG(ResNode->dump(CurDAG));
3237 DEBUG(errs() << "\n");
3242 /// createAArch64ISelDag - This pass converts a legalized DAG into a
3243 /// AArch64-specific DAG, ready for instruction scheduling.
3244 FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
3245 CodeGenOpt::Level OptLevel) {
3246 return new AArch64DAGToDAGISel(TM, OptLevel);