1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the AArch64 target.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
28 #define DEBUG_TYPE "aarch64-isel"
30 //===--------------------------------------------------------------------===//
31 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32 /// instructions for SelectionDAG operations.
36 class AArch64DAGToDAGISel : public SelectionDAGISel {
37 AArch64TargetMachine &TM;
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
48 : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr),
51 const char *getPassName() const override {
52 return "AArch64 Instruction Selection";
55 bool runOnMachineFunction(MachineFunction &MF) override {
57 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
58 MF.getFunction()->hasFnAttribute(Attribute::MinSize);
59 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
60 return SelectionDAGISel::runOnMachineFunction(MF);
63 SDNode *Select(SDNode *Node) override;
65 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
66 /// inline asm expressions.
67 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
68 unsigned ConstraintID,
69 std::vector<SDValue> &OutOps) override;
71 SDNode *SelectMLAV64LaneV128(SDNode *N);
72 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
73 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
74 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
75 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
76 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
77 return SelectShiftedRegister(N, false, Reg, Shift);
79 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
80 return SelectShiftedRegister(N, true, Reg, Shift);
82 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
83 return SelectAddrModeIndexed(N, 1, Base, OffImm);
85 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
86 return SelectAddrModeIndexed(N, 2, Base, OffImm);
88 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
89 return SelectAddrModeIndexed(N, 4, Base, OffImm);
91 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
92 return SelectAddrModeIndexed(N, 8, Base, OffImm);
94 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
95 return SelectAddrModeIndexed(N, 16, Base, OffImm);
97 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
98 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
100 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
101 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
103 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
104 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
106 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
107 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
109 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
110 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
114 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
115 SDValue &SignExtend, SDValue &DoShift) {
116 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
120 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
121 SDValue &SignExtend, SDValue &DoShift) {
122 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
126 /// Form sequences of consecutive 64/128-bit registers for use in NEON
127 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
128 /// between 1 and 4 elements. If it contains a single element that is returned
129 /// unchanged; otherwise a REG_SEQUENCE value is returned.
130 SDValue createDTuple(ArrayRef<SDValue> Vecs);
131 SDValue createQTuple(ArrayRef<SDValue> Vecs);
133 /// Generic helper for the createDTuple/createQTuple
134 /// functions. Those should almost always be called instead.
135 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
136 const unsigned SubRegs[]);
138 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
140 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
142 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
144 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
146 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
147 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
149 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
150 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
151 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
152 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
154 SDNode *SelectBitfieldExtractOp(SDNode *N);
155 SDNode *SelectBitfieldInsertOp(SDNode *N);
157 SDNode *SelectLIBM(SDNode *N);
159 // Include the pieces autogenerated from the target description.
160 #include "AArch64GenDAGISel.inc"
163 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
165 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
167 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
169 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
170 SDValue &Offset, SDValue &SignExtend,
172 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
173 SDValue &Offset, SDValue &SignExtend,
175 bool isWorthFolding(SDValue V) const;
176 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
177 SDValue &Offset, SDValue &SignExtend);
179 template<unsigned RegWidth>
180 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
181 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
184 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
186 } // end anonymous namespace
188 /// isIntImmediate - This method tests to see if the node is a constant
189 /// operand. If so Imm will receive the 32-bit value.
190 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
191 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
192 Imm = C->getZExtValue();
198 // isIntImmediate - This method tests to see if a constant operand.
199 // If so Imm will receive the value.
200 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
201 return isIntImmediate(N.getNode(), Imm);
204 // isOpcWithIntImmediate - This method tests to see if the node is a specific
205 // opcode and that it has a immediate integer right operand.
206 // If so Imm will receive the 32 bit value.
207 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
209 return N->getOpcode() == Opc &&
210 isIntImmediate(N->getOperand(1).getNode(), Imm);
213 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
214 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
215 assert(ConstraintID == InlineAsm::Constraint_m &&
216 "unexpected asm memory constraint");
217 // Require the address to be in a register. That is safe for all AArch64
218 // variants and it is hard to do anything much smarter without knowing
219 // how the operand is used.
220 OutOps.push_back(Op);
224 /// SelectArithImmed - Select an immediate value that can be represented as
225 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
226 /// Val set to the 12-bit value and Shift set to the shifter operand.
227 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
229 // This function is called from the addsub_shifted_imm ComplexPattern,
230 // which lists [imm] as the list of opcode it's interested in, however
231 // we still need to check whether the operand is actually an immediate
232 // here because the ComplexPattern opcode list is only used in
233 // root-level opcode matching.
234 if (!isa<ConstantSDNode>(N.getNode()))
237 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
240 if (Immed >> 12 == 0) {
242 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
248 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
249 Val = CurDAG->getTargetConstant(Immed, MVT::i32);
250 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
254 /// SelectNegArithImmed - As above, but negates the value before trying to
256 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
258 // This function is called from the addsub_shifted_imm ComplexPattern,
259 // which lists [imm] as the list of opcode it's interested in, however
260 // we still need to check whether the operand is actually an immediate
261 // here because the ComplexPattern opcode list is only used in
262 // root-level opcode matching.
263 if (!isa<ConstantSDNode>(N.getNode()))
266 // The immediate operand must be a 24-bit zero-extended immediate.
267 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
269 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
270 // have the opposite effect on the C flag, so this pattern mustn't match under
271 // those circumstances.
275 if (N.getValueType() == MVT::i32)
276 Immed = ~((uint32_t)Immed) + 1;
278 Immed = ~Immed + 1ULL;
279 if (Immed & 0xFFFFFFFFFF000000ULL)
282 Immed &= 0xFFFFFFULL;
283 return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
286 /// getShiftTypeForNode - Translate a shift node to the corresponding
288 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
289 switch (N.getOpcode()) {
291 return AArch64_AM::InvalidShiftExtend;
293 return AArch64_AM::LSL;
295 return AArch64_AM::LSR;
297 return AArch64_AM::ASR;
299 return AArch64_AM::ROR;
303 /// \brief Determine whether it is worth to fold V into an extended register.
304 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
305 // it hurts if the value is used at least twice, unless we are optimizing
307 if (ForCodeSize || V.hasOneUse())
312 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
313 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
314 /// instructions allow the shifted register to be rotated, but the arithmetic
315 /// instructions do not. The AllowROR parameter specifies whether ROR is
317 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
318 SDValue &Reg, SDValue &Shift) {
319 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
320 if (ShType == AArch64_AM::InvalidShiftExtend)
322 if (!AllowROR && ShType == AArch64_AM::ROR)
325 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
326 unsigned BitSize = N.getValueType().getSizeInBits();
327 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
328 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
330 Reg = N.getOperand(0);
331 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
332 return isWorthFolding(N);
338 /// getExtendTypeForNode - Translate an extend node to the corresponding
339 /// ExtendType value.
340 static AArch64_AM::ShiftExtendType
341 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
342 if (N.getOpcode() == ISD::SIGN_EXTEND ||
343 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
345 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
346 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
348 SrcVT = N.getOperand(0).getValueType();
350 if (!IsLoadStore && SrcVT == MVT::i8)
351 return AArch64_AM::SXTB;
352 else if (!IsLoadStore && SrcVT == MVT::i16)
353 return AArch64_AM::SXTH;
354 else if (SrcVT == MVT::i32)
355 return AArch64_AM::SXTW;
356 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
358 return AArch64_AM::InvalidShiftExtend;
359 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
360 N.getOpcode() == ISD::ANY_EXTEND) {
361 EVT SrcVT = N.getOperand(0).getValueType();
362 if (!IsLoadStore && SrcVT == MVT::i8)
363 return AArch64_AM::UXTB;
364 else if (!IsLoadStore && SrcVT == MVT::i16)
365 return AArch64_AM::UXTH;
366 else if (SrcVT == MVT::i32)
367 return AArch64_AM::UXTW;
368 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
370 return AArch64_AM::InvalidShiftExtend;
371 } else if (N.getOpcode() == ISD::AND) {
372 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
374 return AArch64_AM::InvalidShiftExtend;
375 uint64_t AndMask = CSD->getZExtValue();
379 return AArch64_AM::InvalidShiftExtend;
381 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
383 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
385 return AArch64_AM::UXTW;
389 return AArch64_AM::InvalidShiftExtend;
392 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
393 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
394 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
395 DL->getOpcode() != AArch64ISD::DUPLANE32)
398 SDValue SV = DL->getOperand(0);
399 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
402 SDValue EV = SV.getOperand(1);
403 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
406 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
407 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
408 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
409 LaneOp = EV.getOperand(0);
414 // Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
415 // high lane extract.
416 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
417 SDValue &LaneOp, int &LaneIdx) {
419 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
421 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
428 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
429 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
430 /// so that we don't emit unnecessary lane extracts.
431 SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
432 SDValue Op0 = N->getOperand(0);
433 SDValue Op1 = N->getOperand(1);
434 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
435 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
436 int LaneIdx = -1; // Will hold the lane index.
438 if (Op1.getOpcode() != ISD::MUL ||
439 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
442 if (Op1.getOpcode() != ISD::MUL ||
443 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
448 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
450 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
452 unsigned MLAOpc = ~0U;
454 switch (N->getSimpleValueType(0).SimpleTy) {
456 llvm_unreachable("Unrecognized MLA.");
458 MLAOpc = AArch64::MLAv4i16_indexed;
461 MLAOpc = AArch64::MLAv8i16_indexed;
464 MLAOpc = AArch64::MLAv2i32_indexed;
467 MLAOpc = AArch64::MLAv4i32_indexed;
471 return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
474 SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
479 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
483 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
485 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
487 unsigned SMULLOpc = ~0U;
489 if (IntNo == Intrinsic::aarch64_neon_smull) {
490 switch (N->getSimpleValueType(0).SimpleTy) {
492 llvm_unreachable("Unrecognized SMULL.");
494 SMULLOpc = AArch64::SMULLv4i16_indexed;
497 SMULLOpc = AArch64::SMULLv2i32_indexed;
500 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
501 switch (N->getSimpleValueType(0).SimpleTy) {
503 llvm_unreachable("Unrecognized SMULL.");
505 SMULLOpc = AArch64::UMULLv4i16_indexed;
508 SMULLOpc = AArch64::UMULLv2i32_indexed;
512 llvm_unreachable("Unrecognized intrinsic.");
514 return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
517 /// Instructions that accept extend modifiers like UXTW expect the register
518 /// being extended to be a GPR32, but the incoming DAG might be acting on a
519 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
520 /// this is the case.
521 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
522 if (N.getValueType() == MVT::i32)
525 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
526 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
527 SDLoc(N), MVT::i32, N, SubReg);
528 return SDValue(Node, 0);
532 /// SelectArithExtendedRegister - Select a "extended register" operand. This
533 /// operand folds in an extend followed by an optional left shift.
534 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
536 unsigned ShiftVal = 0;
537 AArch64_AM::ShiftExtendType Ext;
539 if (N.getOpcode() == ISD::SHL) {
540 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
543 ShiftVal = CSD->getZExtValue();
547 Ext = getExtendTypeForNode(N.getOperand(0));
548 if (Ext == AArch64_AM::InvalidShiftExtend)
551 Reg = N.getOperand(0).getOperand(0);
553 Ext = getExtendTypeForNode(N);
554 if (Ext == AArch64_AM::InvalidShiftExtend)
557 Reg = N.getOperand(0);
560 // AArch64 mandates that the RHS of the operation must use the smallest
561 // register classs that could contain the size being extended from. Thus,
562 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
563 // there might not be an actual 32-bit value in the program. We can
564 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
565 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
566 Reg = narrowIfNeeded(CurDAG, Reg);
567 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
568 return isWorthFolding(N);
571 /// If there's a use of this ADDlow that's not itself a load/store then we'll
572 /// need to create a real ADD instruction from it anyway and there's no point in
573 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
574 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
575 /// leads to duplaicated ADRP instructions.
576 static bool isWorthFoldingADDlow(SDValue N) {
577 for (auto Use : N->uses()) {
578 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
579 Use->getOpcode() != ISD::ATOMIC_LOAD &&
580 Use->getOpcode() != ISD::ATOMIC_STORE)
583 // ldar and stlr have much more restrictive addressing modes (just a
585 if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
592 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
593 /// immediate" address. The "Size" argument is the size in bytes of the memory
594 /// reference, which determines the scale.
595 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
596 SDValue &Base, SDValue &OffImm) {
597 const TargetLowering *TLI = getTargetLowering();
598 if (N.getOpcode() == ISD::FrameIndex) {
599 int FI = cast<FrameIndexSDNode>(N)->getIndex();
600 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
601 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
605 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
606 GlobalAddressSDNode *GAN =
607 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
608 Base = N.getOperand(0);
609 OffImm = N.getOperand(1);
613 const GlobalValue *GV = GAN->getGlobal();
614 unsigned Alignment = GV->getAlignment();
615 const DataLayout *DL = TLI->getDataLayout();
616 Type *Ty = GV->getType()->getElementType();
617 if (Alignment == 0 && Ty->isSized())
618 Alignment = DL->getABITypeAlignment(Ty);
620 if (Alignment >= Size)
624 if (CurDAG->isBaseWithConstantOffset(N)) {
625 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
626 int64_t RHSC = (int64_t)RHS->getZExtValue();
627 unsigned Scale = Log2_32(Size);
628 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
629 Base = N.getOperand(0);
630 if (Base.getOpcode() == ISD::FrameIndex) {
631 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
632 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
634 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
640 // Before falling back to our general case, check if the unscaled
641 // instructions can handle this. If so, that's preferable.
642 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
645 // Base only. The address will be materialized into a register before
646 // the memory is accessed.
647 // add x0, Xbase, #offset
650 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
654 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
655 /// immediate" address. This should only match when there is an offset that
656 /// is not valid for a scaled immediate addressing mode. The "Size" argument
657 /// is the size in bytes of the memory reference, which is needed here to know
658 /// what is valid for a scaled immediate.
659 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
662 if (!CurDAG->isBaseWithConstantOffset(N))
664 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
665 int64_t RHSC = RHS->getSExtValue();
666 // If the offset is valid as a scaled immediate, don't match here.
667 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
668 RHSC < (0x1000 << Log2_32(Size)))
670 if (RHSC >= -256 && RHSC < 256) {
671 Base = N.getOperand(0);
672 if (Base.getOpcode() == ISD::FrameIndex) {
673 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
674 const TargetLowering *TLI = getTargetLowering();
675 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
677 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
684 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
685 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
686 SDValue ImpDef = SDValue(
687 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
689 MachineSDNode *Node = CurDAG->getMachineNode(
690 TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
691 return SDValue(Node, 0);
694 /// \brief Check if the given SHL node (\p N), can be used to form an
695 /// extended register for an addressing mode.
696 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
697 bool WantExtend, SDValue &Offset,
698 SDValue &SignExtend) {
699 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
700 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
701 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
705 AArch64_AM::ShiftExtendType Ext =
706 getExtendTypeForNode(N.getOperand(0), true);
707 if (Ext == AArch64_AM::InvalidShiftExtend)
710 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
711 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
713 Offset = N.getOperand(0);
714 SignExtend = CurDAG->getTargetConstant(0, MVT::i32);
717 unsigned LegalShiftVal = Log2_32(Size);
718 unsigned ShiftVal = CSD->getZExtValue();
720 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
723 if (isWorthFolding(N))
729 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
730 SDValue &Base, SDValue &Offset,
733 if (N.getOpcode() != ISD::ADD)
735 SDValue LHS = N.getOperand(0);
736 SDValue RHS = N.getOperand(1);
738 // We don't want to match immediate adds here, because they are better lowered
739 // to the register-immediate addressing modes.
740 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
743 // Check if this particular node is reused in any non-memory related
744 // operation. If yes, do not try to fold this node into the address
745 // computation, since the computation will be kept.
746 const SDNode *Node = N.getNode();
747 for (SDNode *UI : Node->uses()) {
748 if (!isa<MemSDNode>(*UI))
752 // Remember if it is worth folding N when it produces extended register.
753 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
755 // Try to match a shifted extend on the RHS.
756 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
757 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
759 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
763 // Try to match a shifted extend on the LHS.
764 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
765 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
767 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
771 // There was no shift, whatever else we find.
772 DoShift = CurDAG->getTargetConstant(false, MVT::i32);
774 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
775 // Try to match an unshifted extend on the LHS.
776 if (IsExtendedRegisterWorthFolding &&
777 (Ext = getExtendTypeForNode(LHS, true)) !=
778 AArch64_AM::InvalidShiftExtend) {
780 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
781 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
782 if (isWorthFolding(LHS))
786 // Try to match an unshifted extend on the RHS.
787 if (IsExtendedRegisterWorthFolding &&
788 (Ext = getExtendTypeForNode(RHS, true)) !=
789 AArch64_AM::InvalidShiftExtend) {
791 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
792 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, MVT::i32);
793 if (isWorthFolding(RHS))
800 // Check if the given immediate is preferred by ADD. If an immediate can be
801 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
802 // encoded by one MOVZ, return true.
803 static bool isPreferredADD(int64_t ImmOff) {
804 // Constant in [0x0, 0xfff] can be encoded in ADD.
805 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
807 // Check if it can be encoded in an "ADD LSL #12".
808 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
809 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
810 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
811 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
815 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
816 SDValue &Base, SDValue &Offset,
819 if (N.getOpcode() != ISD::ADD)
821 SDValue LHS = N.getOperand(0);
822 SDValue RHS = N.getOperand(1);
824 // Check if this particular node is reused in any non-memory related
825 // operation. If yes, do not try to fold this node into the address
826 // computation, since the computation will be kept.
827 const SDNode *Node = N.getNode();
828 for (SDNode *UI : Node->uses()) {
829 if (!isa<MemSDNode>(*UI))
833 // Watch out if RHS is a wide immediate, it can not be selected into
834 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
835 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
836 // instructions like:
837 // MOV X0, WideImmediate
838 // ADD X1, BaseReg, X0
840 // For such situation, using [BaseReg, XReg] addressing mode can save one
842 // MOV X0, WideImmediate
843 // LDR X2, [BaseReg, X0]
844 if (isa<ConstantSDNode>(RHS)) {
845 int64_t ImmOff = (int64_t)dyn_cast<ConstantSDNode>(RHS)->getZExtValue();
846 unsigned Scale = Log2_32(Size);
847 // Skip the immediate can be seleced by load/store addressing mode.
848 // Also skip the immediate can be encoded by a single ADD (SUB is also
849 // checked by using -ImmOff).
850 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
851 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
854 SDLoc DL(N.getNode());
855 SDValue Ops[] = { RHS };
857 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
858 SDValue MOVIV = SDValue(MOVI, 0);
859 // This ADD of two X register will be selected into [Reg+Reg] mode.
860 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
863 // Remember if it is worth folding N when it produces extended register.
864 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
866 // Try to match a shifted extend on the RHS.
867 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
868 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
870 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
874 // Try to match a shifted extend on the LHS.
875 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
876 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
878 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
882 // Match any non-shifted, non-extend, non-immediate add expression.
885 SignExtend = CurDAG->getTargetConstant(false, MVT::i32);
886 DoShift = CurDAG->getTargetConstant(false, MVT::i32);
887 // Reg1 + Reg2 is free: no check needed.
891 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
892 static const unsigned RegClassIDs[] = {
893 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
894 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
895 AArch64::dsub2, AArch64::dsub3};
897 return createTuple(Regs, RegClassIDs, SubRegs);
900 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
901 static const unsigned RegClassIDs[] = {
902 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
903 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
904 AArch64::qsub2, AArch64::qsub3};
906 return createTuple(Regs, RegClassIDs, SubRegs);
909 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
910 const unsigned RegClassIDs[],
911 const unsigned SubRegs[]) {
912 // There's no special register-class for a vector-list of 1 element: it's just
914 if (Regs.size() == 1)
917 assert(Regs.size() >= 2 && Regs.size() <= 4);
919 SDLoc DL(Regs[0].getNode());
921 SmallVector<SDValue, 4> Ops;
923 // First operand of REG_SEQUENCE is the desired RegClass.
925 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
927 // Then we get pairs of source & subregister-position for the components.
928 for (unsigned i = 0; i < Regs.size(); ++i) {
929 Ops.push_back(Regs[i]);
930 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
934 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
935 return SDValue(N, 0);
938 SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
939 unsigned Opc, bool isExt) {
941 EVT VT = N->getValueType(0);
943 unsigned ExtOff = isExt;
945 // Form a REG_SEQUENCE to force register allocation.
946 unsigned Vec0Off = ExtOff + 1;
947 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
948 N->op_begin() + Vec0Off + NumVecs);
949 SDValue RegSeq = createQTuple(Regs);
951 SmallVector<SDValue, 6> Ops;
953 Ops.push_back(N->getOperand(1));
954 Ops.push_back(RegSeq);
955 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
956 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
959 SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
960 LoadSDNode *LD = cast<LoadSDNode>(N);
961 if (LD->isUnindexed())
963 EVT VT = LD->getMemoryVT();
964 EVT DstVT = N->getValueType(0);
965 ISD::MemIndexedMode AM = LD->getAddressingMode();
966 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
968 // We're not doing validity checking here. That was done when checking
969 // if we should mark the load as indexed or not. We're just selecting
970 // the right instruction.
973 ISD::LoadExtType ExtType = LD->getExtensionType();
974 bool InsertTo64 = false;
976 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
977 else if (VT == MVT::i32) {
978 if (ExtType == ISD::NON_EXTLOAD)
979 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
980 else if (ExtType == ISD::SEXTLOAD)
981 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
983 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
985 // The result of the load is only i32. It's the subreg_to_reg that makes
989 } else if (VT == MVT::i16) {
990 if (ExtType == ISD::SEXTLOAD) {
991 if (DstVT == MVT::i64)
992 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
994 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
996 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
997 InsertTo64 = DstVT == MVT::i64;
998 // The result of the load is only i32. It's the subreg_to_reg that makes
1002 } else if (VT == MVT::i8) {
1003 if (ExtType == ISD::SEXTLOAD) {
1004 if (DstVT == MVT::i64)
1005 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1007 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1009 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1010 InsertTo64 = DstVT == MVT::i64;
1011 // The result of the load is only i32. It's the subreg_to_reg that makes
1015 } else if (VT == MVT::f32) {
1016 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1017 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1018 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1019 } else if (VT.is128BitVector()) {
1020 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1023 SDValue Chain = LD->getChain();
1024 SDValue Base = LD->getBasePtr();
1025 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1026 int OffsetVal = (int)OffsetOp->getZExtValue();
1027 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
1028 SDValue Ops[] = { Base, Offset, Chain };
1029 SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, DstVT,
1031 // Either way, we're replacing the node, so tell the caller that.
1033 SDValue LoadedVal = SDValue(Res, 1);
1035 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
1037 SDValue(CurDAG->getMachineNode(
1038 AArch64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
1039 CurDAG->getTargetConstant(0, MVT::i64), LoadedVal, SubReg),
1043 ReplaceUses(SDValue(N, 0), LoadedVal);
1044 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1045 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1050 SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
1051 unsigned Opc, unsigned SubRegIdx) {
1053 EVT VT = N->getValueType(0);
1054 SDValue Chain = N->getOperand(0);
1056 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1059 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1061 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1062 SDValue SuperReg = SDValue(Ld, 0);
1063 for (unsigned i = 0; i < NumVecs; ++i)
1064 ReplaceUses(SDValue(N, i),
1065 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1067 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1071 SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1072 unsigned Opc, unsigned SubRegIdx) {
1074 EVT VT = N->getValueType(0);
1075 SDValue Chain = N->getOperand(0);
1077 SDValue Ops[] = {N->getOperand(1), // Mem operand
1078 N->getOperand(2), // Incremental
1081 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1082 MVT::Untyped, MVT::Other};
1084 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1086 // Update uses of write back register
1087 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1089 // Update uses of vector list
1090 SDValue SuperReg = SDValue(Ld, 1);
1092 ReplaceUses(SDValue(N, 0), SuperReg);
1094 for (unsigned i = 0; i < NumVecs; ++i)
1095 ReplaceUses(SDValue(N, i),
1096 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1099 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1103 SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1106 EVT VT = N->getOperand(2)->getValueType(0);
1108 // Form a REG_SEQUENCE to force register allocation.
1109 bool Is128Bit = VT.getSizeInBits() == 128;
1110 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1111 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1113 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
1114 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1119 SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1122 EVT VT = N->getOperand(2)->getValueType(0);
1123 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1124 MVT::Other}; // Type for the Chain
1126 // Form a REG_SEQUENCE to force register allocation.
1127 bool Is128Bit = VT.getSizeInBits() == 128;
1128 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1129 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1131 SDValue Ops[] = {RegSeq,
1132 N->getOperand(NumVecs + 1), // base register
1133 N->getOperand(NumVecs + 2), // Incremental
1134 N->getOperand(0)}; // Chain
1135 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1140 /// WidenVector - Given a value in the V64 register class, produce the
1141 /// equivalent value in the V128 register class.
1146 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1148 SDValue operator()(SDValue V64Reg) {
1149 EVT VT = V64Reg.getValueType();
1150 unsigned NarrowSize = VT.getVectorNumElements();
1151 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1152 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1156 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1157 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1161 /// NarrowVector - Given a value in the V128 register class, produce the
1162 /// equivalent value in the V64 register class.
1163 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1164 EVT VT = V128Reg.getValueType();
1165 unsigned WideSize = VT.getVectorNumElements();
1166 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1167 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1169 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1173 SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1176 EVT VT = N->getValueType(0);
1177 bool Narrow = VT.getSizeInBits() == 64;
1179 // Form a REG_SEQUENCE to force register allocation.
1180 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1183 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1184 WidenVector(*CurDAG));
1186 SDValue RegSeq = createQTuple(Regs);
1188 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1191 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1193 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
1194 N->getOperand(NumVecs + 3), N->getOperand(0)};
1195 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1196 SDValue SuperReg = SDValue(Ld, 0);
1198 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1199 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1201 for (unsigned i = 0; i < NumVecs; ++i) {
1202 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1204 NV = NarrowVector(NV, *CurDAG);
1205 ReplaceUses(SDValue(N, i), NV);
1208 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1213 SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1216 EVT VT = N->getValueType(0);
1217 bool Narrow = VT.getSizeInBits() == 64;
1219 // Form a REG_SEQUENCE to force register allocation.
1220 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1223 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1224 WidenVector(*CurDAG));
1226 SDValue RegSeq = createQTuple(Regs);
1228 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1229 MVT::Untyped, MVT::Other};
1232 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1234 SDValue Ops[] = {RegSeq,
1235 CurDAG->getTargetConstant(LaneNo, MVT::i64), // Lane Number
1236 N->getOperand(NumVecs + 2), // Base register
1237 N->getOperand(NumVecs + 3), // Incremental
1239 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1241 // Update uses of the write back register
1242 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1244 // Update uses of the vector list
1245 SDValue SuperReg = SDValue(Ld, 1);
1247 ReplaceUses(SDValue(N, 0),
1248 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1250 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1251 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1253 for (unsigned i = 0; i < NumVecs; ++i) {
1254 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1257 NV = NarrowVector(NV, *CurDAG);
1258 ReplaceUses(SDValue(N, i), NV);
1263 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1268 SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1271 EVT VT = N->getOperand(2)->getValueType(0);
1272 bool Narrow = VT.getSizeInBits() == 64;
1274 // Form a REG_SEQUENCE to force register allocation.
1275 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1278 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1279 WidenVector(*CurDAG));
1281 SDValue RegSeq = createQTuple(Regs);
1284 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1286 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
1287 N->getOperand(NumVecs + 3), N->getOperand(0)};
1288 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1290 // Transfer memoperands.
1291 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1292 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1293 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1298 SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1301 EVT VT = N->getOperand(2)->getValueType(0);
1302 bool Narrow = VT.getSizeInBits() == 64;
1304 // Form a REG_SEQUENCE to force register allocation.
1305 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1308 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1309 WidenVector(*CurDAG));
1311 SDValue RegSeq = createQTuple(Regs);
1313 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1317 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1319 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, MVT::i64),
1320 N->getOperand(NumVecs + 2), // Base Register
1321 N->getOperand(NumVecs + 3), // Incremental
1323 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1325 // Transfer memoperands.
1326 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1327 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1328 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1333 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1334 unsigned &Opc, SDValue &Opd0,
1335 unsigned &LSB, unsigned &MSB,
1336 unsigned NumberOfIgnoredLowBits,
1337 bool BiggerPattern) {
1338 assert(N->getOpcode() == ISD::AND &&
1339 "N must be a AND operation to call this function");
1341 EVT VT = N->getValueType(0);
1343 // Here we can test the type of VT and return false when the type does not
1344 // match, but since it is done prior to that call in the current context
1345 // we turned that into an assert to avoid redundant code.
1346 assert((VT == MVT::i32 || VT == MVT::i64) &&
1347 "Type checking must have been done before calling this function");
1349 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1350 // changed the AND node to a 32-bit mask operation. We'll have to
1351 // undo that as part of the transform here if we want to catch all
1352 // the opportunities.
1353 // Currently the NumberOfIgnoredLowBits argument helps to recover
1354 // form these situations when matching bigger pattern (bitfield insert).
1356 // For unsigned extracts, check for a shift right and mask
1357 uint64_t And_imm = 0;
1358 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1361 const SDNode *Op0 = N->getOperand(0).getNode();
1363 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1364 // simplified. Try to undo that
1365 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1367 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1368 if (And_imm & (And_imm + 1))
1371 bool ClampMSB = false;
1372 uint64_t Srl_imm = 0;
1373 // Handle the SRL + ANY_EXTEND case.
1374 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1375 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1376 // Extend the incoming operand of the SRL to 64-bit.
1377 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1378 // Make sure to clamp the MSB so that we preserve the semantics of the
1379 // original operations.
1381 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1382 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1384 // If the shift result was truncated, we can still combine them.
1385 Opd0 = Op0->getOperand(0).getOperand(0);
1387 // Use the type of SRL node.
1388 VT = Opd0->getValueType(0);
1389 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1390 Opd0 = Op0->getOperand(0);
1391 } else if (BiggerPattern) {
1392 // Let's pretend a 0 shift right has been performed.
1393 // The resulting code will be at least as good as the original one
1394 // plus it may expose more opportunities for bitfield insert pattern.
1395 // FIXME: Currently we limit this to the bigger pattern, because
1396 // some optimizations expect AND and not UBFM
1397 Opd0 = N->getOperand(0);
1401 // Bail out on large immediates. This happens when no proper
1402 // combining/constant folding was performed.
1403 if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
1405 << ": Found large shift immediate, this should not happen\n"));
1410 MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
1411 : countTrailingOnes<uint64_t>(And_imm)) -
1414 // Since we're moving the extend before the right shift operation, we need
1415 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1416 // the zeros which would get shifted in with the original right shift
1418 MSB = MSB > 31 ? 31 : MSB;
1420 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1424 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1425 SDValue &Opd0, unsigned &LSB,
1427 // We are looking for the following pattern which basically extracts several
1428 // continuous bits from the source value and places it from the LSB of the
1429 // destination value, all other bits of the destination value or set to zero:
1431 // Value2 = AND Value, MaskImm
1432 // SRL Value2, ShiftImm
1434 // with MaskImm >> ShiftImm to search for the bit width.
1436 // This gets selected into a single UBFM:
1438 // UBFM Value, ShiftImm, BitWide + Srl_imm -1
1441 if (N->getOpcode() != ISD::SRL)
1444 uint64_t And_mask = 0;
1445 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1448 Opd0 = N->getOperand(0).getOperand(0);
1450 uint64_t Srl_imm = 0;
1451 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1454 // Check whether we really have several bits extract here.
1455 unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
1456 if (BitWide && isMask_64(And_mask >> Srl_imm)) {
1457 if (N->getValueType(0) == MVT::i32)
1458 Opc = AArch64::UBFMWri;
1460 Opc = AArch64::UBFMXri;
1463 MSB = BitWide + Srl_imm - 1;
1470 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1471 unsigned &LSB, unsigned &MSB,
1472 bool BiggerPattern) {
1473 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1474 "N must be a SHR/SRA operation to call this function");
1476 EVT VT = N->getValueType(0);
1478 // Here we can test the type of VT and return false when the type does not
1479 // match, but since it is done prior to that call in the current context
1480 // we turned that into an assert to avoid redundant code.
1481 assert((VT == MVT::i32 || VT == MVT::i64) &&
1482 "Type checking must have been done before calling this function");
1484 // Check for AND + SRL doing several bits extract.
1485 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
1488 // we're looking for a shift of a shift
1489 uint64_t Shl_imm = 0;
1490 uint64_t Trunc_bits = 0;
1491 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1492 Opd0 = N->getOperand(0).getOperand(0);
1493 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1494 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1495 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1496 // be considered as setting high 32 bits as zero. Our strategy here is to
1497 // always generate 64bit UBFM. This consistency will help the CSE pass
1498 // later find more redundancy.
1499 Opd0 = N->getOperand(0).getOperand(0);
1500 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1501 VT = Opd0->getValueType(0);
1502 assert(VT == MVT::i64 && "the promoted type should be i64");
1503 } else if (BiggerPattern) {
1504 // Let's pretend a 0 shift left has been performed.
1505 // FIXME: Currently we limit this to the bigger pattern case,
1506 // because some optimizations expect AND and not UBFM
1507 Opd0 = N->getOperand(0);
1511 // Missing combines/constant folding may have left us with strange
1513 if (Shl_imm >= VT.getSizeInBits()) {
1515 << ": Found large shift immediate, this should not happen\n"));
1519 uint64_t Srl_imm = 0;
1520 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1523 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1524 "bad amount in shift node!");
1525 // Note: The width operand is encoded as width-1.
1526 unsigned Width = VT.getSizeInBits() - Trunc_bits - Srl_imm - 1;
1527 int sLSB = Srl_imm - Shl_imm;
1532 // SRA requires a signed extraction
1534 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1536 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1540 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1541 SDValue &Opd0, unsigned &LSB, unsigned &MSB,
1542 unsigned NumberOfIgnoredLowBits = 0,
1543 bool BiggerPattern = false) {
1544 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1547 switch (N->getOpcode()) {
1549 if (!N->isMachineOpcode())
1553 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
1554 NumberOfIgnoredLowBits, BiggerPattern);
1557 return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
1560 unsigned NOpc = N->getMachineOpcode();
1564 case AArch64::SBFMWri:
1565 case AArch64::UBFMWri:
1566 case AArch64::SBFMXri:
1567 case AArch64::UBFMXri:
1569 Opd0 = N->getOperand(0);
1570 LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1571 MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1578 SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1579 unsigned Opc, LSB, MSB;
1581 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
1584 EVT VT = N->getValueType(0);
1586 // If the bit extract operation is 64bit but the original type is 32bit, we
1587 // need to add one EXTRACT_SUBREG.
1588 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
1589 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, MVT::i64),
1590 CurDAG->getTargetConstant(MSB, MVT::i64)};
1592 SDNode *BFM = CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i64, Ops64);
1593 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32);
1594 MachineSDNode *Node =
1595 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32,
1596 SDValue(BFM, 0), SubReg);
1600 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, VT),
1601 CurDAG->getTargetConstant(MSB, VT)};
1602 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1605 /// Does DstMask form a complementary pair with the mask provided by
1606 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1607 /// this asks whether DstMask zeroes precisely those bits that will be set by
1609 static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1610 unsigned NumberOfIgnoredHighBits, EVT VT) {
1611 assert((VT == MVT::i32 || VT == MVT::i64) &&
1612 "i32 or i64 mask type expected!");
1613 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1615 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1616 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1618 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1619 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1622 // Look for bits that will be useful for later uses.
1623 // A bit is consider useless as soon as it is dropped and never used
1624 // before it as been dropped.
1625 // E.g., looking for useful bit of x
1628 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1630 // After #2, the useful bits of x are 0x4.
1631 // However, if x is used on an unpredicatable instruction, then all its bits
1637 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1639 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1642 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1643 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1644 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1645 getUsefulBits(Op, UsefulBits, Depth + 1);
1648 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1649 uint64_t Imm, uint64_t MSB,
1651 // inherit the bitwidth value
1652 APInt OpUsefulBits(UsefulBits);
1656 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1658 // The interesting part will be in the lower part of the result
1659 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1660 // The interesting part was starting at Imm in the argument
1661 OpUsefulBits = OpUsefulBits.shl(Imm);
1663 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1665 // The interesting part will be shifted in the result
1666 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1667 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1668 // The interesting part was at zero in the argument
1669 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1672 UsefulBits &= OpUsefulBits;
1675 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1678 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1680 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1682 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1685 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1687 uint64_t ShiftTypeAndValue =
1688 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1689 APInt Mask(UsefulBits);
1690 Mask.clearAllBits();
1693 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1695 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1696 Mask = Mask.shl(ShiftAmt);
1697 getUsefulBits(Op, Mask, Depth + 1);
1698 Mask = Mask.lshr(ShiftAmt);
1699 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1701 // We do not handle AArch64_AM::ASR, because the sign will change the
1702 // number of useful bits
1703 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1704 Mask = Mask.lshr(ShiftAmt);
1705 getUsefulBits(Op, Mask, Depth + 1);
1706 Mask = Mask.shl(ShiftAmt);
1713 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1716 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1718 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1720 if (Op.getOperand(1) == Orig)
1721 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1723 APInt OpUsefulBits(UsefulBits);
1727 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1729 UsefulBits &= ~OpUsefulBits;
1730 getUsefulBits(Op, UsefulBits, Depth + 1);
1732 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1734 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1735 getUsefulBits(Op, UsefulBits, Depth + 1);
1739 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1740 SDValue Orig, unsigned Depth) {
1742 // Users of this node should have already been instruction selected
1743 // FIXME: Can we turn that into an assert?
1744 if (!UserNode->isMachineOpcode())
1747 switch (UserNode->getMachineOpcode()) {
1750 case AArch64::ANDSWri:
1751 case AArch64::ANDSXri:
1752 case AArch64::ANDWri:
1753 case AArch64::ANDXri:
1754 // We increment Depth only when we call the getUsefulBits
1755 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1757 case AArch64::UBFMWri:
1758 case AArch64::UBFMXri:
1759 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1761 case AArch64::ORRWrs:
1762 case AArch64::ORRXrs:
1763 if (UserNode->getOperand(1) != Orig)
1765 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1767 case AArch64::BFMWri:
1768 case AArch64::BFMXri:
1769 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1773 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1776 // Initialize UsefulBits
1778 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1779 // At the beginning, assume every produced bits is useful
1780 UsefulBits = APInt(Bitwidth, 0);
1781 UsefulBits.flipAllBits();
1783 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1785 for (SDNode *Node : Op.getNode()->uses()) {
1786 // A use cannot produce useful bits
1787 APInt UsefulBitsForUse = APInt(UsefulBits);
1788 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1789 UsersUsefulBits |= UsefulBitsForUse;
1791 // UsefulBits contains the produced bits that are meaningful for the
1792 // current definition, thus a user cannot make a bit meaningful at
1794 UsefulBits &= UsersUsefulBits;
1797 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
1798 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1799 /// 0, return Op unchanged.
1800 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1804 EVT VT = Op.getValueType();
1805 unsigned BitWidth = VT.getSizeInBits();
1806 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1809 if (ShlAmount > 0) {
1810 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1811 ShiftNode = CurDAG->getMachineNode(
1812 UBFMOpc, SDLoc(Op), VT, Op,
1813 CurDAG->getTargetConstant(BitWidth - ShlAmount, VT),
1814 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, VT));
1816 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1817 assert(ShlAmount < 0 && "expected right shift");
1818 int ShrAmount = -ShlAmount;
1819 ShiftNode = CurDAG->getMachineNode(
1820 UBFMOpc, SDLoc(Op), VT, Op, CurDAG->getTargetConstant(ShrAmount, VT),
1821 CurDAG->getTargetConstant(BitWidth - 1, VT));
1824 return SDValue(ShiftNode, 0);
1827 /// Does this tree qualify as an attempt to move a bitfield into position,
1828 /// essentially "(and (shl VAL, N), Mask)".
1829 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1830 SDValue &Src, int &ShiftAmount,
1832 EVT VT = Op.getValueType();
1833 unsigned BitWidth = VT.getSizeInBits();
1835 assert(BitWidth == 32 || BitWidth == 64);
1837 APInt KnownZero, KnownOne;
1838 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1840 // Non-zero in the sense that they're not provably zero, which is the key
1841 // point if we want to use this value
1842 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1844 // Discard a constant AND mask if present. It's safe because the node will
1845 // already have been factored into the computeKnownBits calculation above.
1847 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1848 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1849 Op = Op.getOperand(0);
1853 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1855 Op = Op.getOperand(0);
1857 if (!isShiftedMask_64(NonZeroBits))
1860 ShiftAmount = countTrailingZeros(NonZeroBits);
1861 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
1863 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1864 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1866 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1871 // Given a OR operation, check if we have the following pattern
1872 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1873 // isBitfieldExtractOp)
1874 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1875 // countTrailingZeros(mask2) == imm2 - imm + 1
1877 // if yes, given reference arguments will be update so that one can replace
1878 // the OR instruction with:
1879 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1880 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1881 SDValue &Src, unsigned &ImmR,
1882 unsigned &ImmS, SelectionDAG *CurDAG) {
1883 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1886 EVT VT = N->getValueType(0);
1888 Opc = AArch64::BFMWri;
1889 else if (VT == MVT::i64)
1890 Opc = AArch64::BFMXri;
1894 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1895 // have the expected shape. Try to undo that.
1897 getUsefulBits(SDValue(N, 0), UsefulBits);
1899 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1900 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1902 // OR is commutative, check both possibilities (does llvm provide a
1903 // way to do that directely, e.g., via code matcher?)
1904 SDValue OrOpd1Val = N->getOperand(1);
1905 SDNode *OrOpd0 = N->getOperand(0).getNode();
1906 SDNode *OrOpd1 = N->getOperand(1).getNode();
1907 for (int i = 0; i < 2;
1908 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1911 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
1912 NumberOfIgnoredLowBits, true)) {
1913 // Check that the returned opcode is compatible with the pattern,
1914 // i.e., same type and zero extended (U and not S)
1915 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
1916 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
1919 // Compute the width of the bitfield insertion
1921 Width = ImmS - ImmR + 1;
1922 // FIXME: This constraint is to catch bitfield insertion we may
1923 // want to widen the pattern if we want to grab general bitfied
1928 // If the mask on the insertee is correct, we have a BFXIL operation. We
1929 // can share the ImmR and ImmS values from the already-computed UBFM.
1930 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
1932 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
1937 // Check the second part of the pattern
1938 EVT VT = OrOpd1->getValueType(0);
1939 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
1941 // Compute the Known Zero for the candidate of the first operand.
1942 // This allows to catch more general case than just looking for
1943 // AND with imm. Indeed, simplify-demanded-bits may have removed
1944 // the AND instruction because it proves it was useless.
1945 APInt KnownZero, KnownOne;
1946 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
1948 // Check if there is enough room for the second operand to appear
1950 APInt BitsToBeInserted =
1951 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
1953 if ((BitsToBeInserted & ~KnownZero) != 0)
1956 // Set the first operand
1958 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1959 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
1960 // In that case, we can eliminate the AND
1961 Dst = OrOpd1->getOperand(0);
1963 // Maybe the AND has been removed by simplify-demanded-bits
1964 // or is useful because it discards more bits
1974 SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
1975 if (N->getOpcode() != ISD::OR)
1982 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
1985 EVT VT = N->getValueType(0);
1986 SDValue Ops[] = { Opd0,
1988 CurDAG->getTargetConstant(LSB, VT),
1989 CurDAG->getTargetConstant(MSB, VT) };
1990 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1993 SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) {
1994 EVT VT = N->getValueType(0);
1997 unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
1999 if (VT == MVT::f32) {
2001 } else if (VT == MVT::f64) {
2004 return nullptr; // Unrecognized argument type. Fall back on default codegen.
2006 // Pick the FRINTX variant needed to set the flags.
2007 unsigned FRINTXOpc = FRINTXOpcs[Variant];
2009 switch (N->getOpcode()) {
2011 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
2013 unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
2014 Opc = FRINTPOpcs[Variant];
2018 unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
2019 Opc = FRINTMOpcs[Variant];
2023 unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
2024 Opc = FRINTZOpcs[Variant];
2028 unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
2029 Opc = FRINTAOpcs[Variant];
2035 SDValue In = N->getOperand(0);
2036 SmallVector<SDValue, 2> Ops;
2039 if (!TM.Options.UnsafeFPMath) {
2040 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
2041 Ops.push_back(SDValue(FRINTX, 1));
2044 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2048 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2049 unsigned RegWidth) {
2051 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2052 FVal = CN->getValueAPF();
2053 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2054 // Some otherwise illegal constants are allowed in this case.
2055 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2056 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2059 ConstantPoolSDNode *CN =
2060 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2061 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2065 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2066 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2069 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2070 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2074 // fbits is between 1 and 64 in the worst-case, which means the fmul
2075 // could have 2^64 as an actual operand. Need 65 bits of precision.
2076 APSInt IntVal(65, true);
2077 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2079 // N.b. isPowerOf2 also checks for > 0.
2080 if (!IsExact || !IntVal.isPowerOf2()) return false;
2081 unsigned FBits = IntVal.logBase2();
2083 // Checks above should have guaranteed that we haven't lost information in
2084 // finding FBits, but it must still be in range.
2085 if (FBits == 0 || FBits > RegWidth) return false;
2087 FixedPos = CurDAG->getTargetConstant(FBits, MVT::i32);
2091 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
2092 // Dump information about the Node being selected
2093 DEBUG(errs() << "Selecting: ");
2094 DEBUG(Node->dump(CurDAG));
2095 DEBUG(errs() << "\n");
2097 // If we have a custom node, we already have selected!
2098 if (Node->isMachineOpcode()) {
2099 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2100 Node->setNodeId(-1);
2104 // Few custom selection stuff.
2105 SDNode *ResNode = nullptr;
2106 EVT VT = Node->getValueType(0);
2108 switch (Node->getOpcode()) {
2113 if (SDNode *I = SelectMLAV64LaneV128(Node))
2118 // Try to select as an indexed load. Fall through to normal processing
2121 SDNode *I = SelectIndexedLoad(Node, Done);
2130 if (SDNode *I = SelectBitfieldExtractOp(Node))
2135 if (SDNode *I = SelectBitfieldInsertOp(Node))
2139 case ISD::EXTRACT_VECTOR_ELT: {
2140 // Extracting lane zero is a special case where we can just use a plain
2141 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2142 // the rest of the compiler, especially the register allocator and copyi
2143 // propagation, to reason about, so is preferred when it's possible to
2145 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2146 // Bail and use the default Select() for non-zero lanes.
2147 if (LaneNode->getZExtValue() != 0)
2149 // If the element type is not the same as the result type, likewise
2150 // bail and use the default Select(), as there's more to do than just
2151 // a cross-class COPY. This catches extracts of i8 and i16 elements
2152 // since they will need an explicit zext.
2153 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2156 switch (Node->getOperand(0)
2158 .getVectorElementType()
2161 llvm_unreachable("Unexpected vector element type!");
2163 SubReg = AArch64::dsub;
2166 SubReg = AArch64::ssub;
2169 SubReg = AArch64::hsub;
2172 llvm_unreachable("unexpected zext-requiring extract element!");
2174 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2175 Node->getOperand(0));
2176 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2177 DEBUG(Extract->dumpr(CurDAG));
2178 DEBUG(dbgs() << "\n");
2179 return Extract.getNode();
2181 case ISD::Constant: {
2182 // Materialize zero constants as copies from WZR/XZR. This allows
2183 // the coalescer to propagate these into other instructions.
2184 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2185 if (ConstNode->isNullValue()) {
2187 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2188 AArch64::WZR, MVT::i32).getNode();
2189 else if (VT == MVT::i64)
2190 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2191 AArch64::XZR, MVT::i64).getNode();
2196 case ISD::FrameIndex: {
2197 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2198 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2199 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2200 const TargetLowering *TLI = getTargetLowering();
2201 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
2202 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2203 CurDAG->getTargetConstant(Shifter, MVT::i32) };
2204 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
2206 case ISD::INTRINSIC_W_CHAIN: {
2207 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2211 case Intrinsic::aarch64_ldaxp:
2212 case Intrinsic::aarch64_ldxp: {
2214 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2215 SDValue MemAddr = Node->getOperand(2);
2217 SDValue Chain = Node->getOperand(0);
2219 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2220 MVT::Other, MemAddr, Chain);
2222 // Transfer memoperands.
2223 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2224 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2225 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2228 case Intrinsic::aarch64_stlxp:
2229 case Intrinsic::aarch64_stxp: {
2231 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2233 SDValue Chain = Node->getOperand(0);
2234 SDValue ValLo = Node->getOperand(2);
2235 SDValue ValHi = Node->getOperand(3);
2236 SDValue MemAddr = Node->getOperand(4);
2238 // Place arguments in the right order.
2239 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
2241 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2242 // Transfer memoperands.
2243 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2244 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2245 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2249 case Intrinsic::aarch64_neon_ld1x2:
2250 if (VT == MVT::v8i8)
2251 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2252 else if (VT == MVT::v16i8)
2253 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
2254 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2255 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
2256 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2257 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2258 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2259 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2260 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2261 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2262 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2263 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2264 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2265 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2267 case Intrinsic::aarch64_neon_ld1x3:
2268 if (VT == MVT::v8i8)
2269 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2270 else if (VT == MVT::v16i8)
2271 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
2272 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2273 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
2274 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2275 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2276 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2277 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2278 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2279 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2280 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2281 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2282 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2283 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2285 case Intrinsic::aarch64_neon_ld1x4:
2286 if (VT == MVT::v8i8)
2287 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2288 else if (VT == MVT::v16i8)
2289 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
2290 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2291 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
2292 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2293 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2294 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2295 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2296 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2297 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2298 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2299 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2300 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2301 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2303 case Intrinsic::aarch64_neon_ld2:
2304 if (VT == MVT::v8i8)
2305 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2306 else if (VT == MVT::v16i8)
2307 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
2308 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2309 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
2310 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2311 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2312 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2313 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2314 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2315 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2316 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2317 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2318 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2319 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2321 case Intrinsic::aarch64_neon_ld3:
2322 if (VT == MVT::v8i8)
2323 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2324 else if (VT == MVT::v16i8)
2325 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
2326 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2327 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
2328 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2329 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2330 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2331 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2332 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2333 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2334 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2335 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2336 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2337 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2339 case Intrinsic::aarch64_neon_ld4:
2340 if (VT == MVT::v8i8)
2341 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2342 else if (VT == MVT::v16i8)
2343 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
2344 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2345 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
2346 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2347 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2348 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2349 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2350 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2351 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2352 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2353 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2354 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2355 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2357 case Intrinsic::aarch64_neon_ld2r:
2358 if (VT == MVT::v8i8)
2359 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2360 else if (VT == MVT::v16i8)
2361 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
2362 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2363 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
2364 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2365 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2366 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2367 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2368 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2369 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2370 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2371 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2372 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2373 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2375 case Intrinsic::aarch64_neon_ld3r:
2376 if (VT == MVT::v8i8)
2377 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2378 else if (VT == MVT::v16i8)
2379 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
2380 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2381 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
2382 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2383 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2384 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2385 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2386 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2387 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2388 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2389 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2390 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2391 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2393 case Intrinsic::aarch64_neon_ld4r:
2394 if (VT == MVT::v8i8)
2395 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2396 else if (VT == MVT::v16i8)
2397 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
2398 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2399 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
2400 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2401 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2402 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2403 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2404 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2405 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2406 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2407 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2408 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2409 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2411 case Intrinsic::aarch64_neon_ld2lane:
2412 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2413 return SelectLoadLane(Node, 2, AArch64::LD2i8);
2414 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2416 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2417 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2419 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2420 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2422 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2424 case Intrinsic::aarch64_neon_ld3lane:
2425 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2426 return SelectLoadLane(Node, 3, AArch64::LD3i8);
2427 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2429 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2430 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2432 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2433 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2435 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2437 case Intrinsic::aarch64_neon_ld4lane:
2438 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2439 return SelectLoadLane(Node, 4, AArch64::LD4i8);
2440 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2442 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2443 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2445 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2446 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2448 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2452 case ISD::INTRINSIC_WO_CHAIN: {
2453 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2457 case Intrinsic::aarch64_neon_tbl2:
2458 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
2459 : AArch64::TBLv16i8Two,
2461 case Intrinsic::aarch64_neon_tbl3:
2462 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
2463 : AArch64::TBLv16i8Three,
2465 case Intrinsic::aarch64_neon_tbl4:
2466 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
2467 : AArch64::TBLv16i8Four,
2469 case Intrinsic::aarch64_neon_tbx2:
2470 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
2471 : AArch64::TBXv16i8Two,
2473 case Intrinsic::aarch64_neon_tbx3:
2474 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
2475 : AArch64::TBXv16i8Three,
2477 case Intrinsic::aarch64_neon_tbx4:
2478 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
2479 : AArch64::TBXv16i8Four,
2481 case Intrinsic::aarch64_neon_smull:
2482 case Intrinsic::aarch64_neon_umull:
2483 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2489 case ISD::INTRINSIC_VOID: {
2490 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2491 if (Node->getNumOperands() >= 3)
2492 VT = Node->getOperand(2)->getValueType(0);
2496 case Intrinsic::aarch64_neon_st1x2: {
2497 if (VT == MVT::v8i8)
2498 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2499 else if (VT == MVT::v16i8)
2500 return SelectStore(Node, 2, AArch64::ST1Twov16b);
2501 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2502 return SelectStore(Node, 2, AArch64::ST1Twov4h);
2503 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2504 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2505 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2506 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2507 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2508 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2509 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2510 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2511 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2512 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2515 case Intrinsic::aarch64_neon_st1x3: {
2516 if (VT == MVT::v8i8)
2517 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2518 else if (VT == MVT::v16i8)
2519 return SelectStore(Node, 3, AArch64::ST1Threev16b);
2520 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2521 return SelectStore(Node, 3, AArch64::ST1Threev4h);
2522 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2523 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2524 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2525 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2526 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2527 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2528 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2529 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2530 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2531 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2534 case Intrinsic::aarch64_neon_st1x4: {
2535 if (VT == MVT::v8i8)
2536 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2537 else if (VT == MVT::v16i8)
2538 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
2539 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2540 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
2541 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2542 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2543 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2544 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2545 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2546 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2547 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2548 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2549 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2550 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2553 case Intrinsic::aarch64_neon_st2: {
2554 if (VT == MVT::v8i8)
2555 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2556 else if (VT == MVT::v16i8)
2557 return SelectStore(Node, 2, AArch64::ST2Twov16b);
2558 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2559 return SelectStore(Node, 2, AArch64::ST2Twov4h);
2560 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2561 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2562 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2563 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2564 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2565 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2566 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2567 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2568 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2569 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2572 case Intrinsic::aarch64_neon_st3: {
2573 if (VT == MVT::v8i8)
2574 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2575 else if (VT == MVT::v16i8)
2576 return SelectStore(Node, 3, AArch64::ST3Threev16b);
2577 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2578 return SelectStore(Node, 3, AArch64::ST3Threev4h);
2579 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2580 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2581 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2582 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2583 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2584 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2585 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2586 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2587 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2588 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2591 case Intrinsic::aarch64_neon_st4: {
2592 if (VT == MVT::v8i8)
2593 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2594 else if (VT == MVT::v16i8)
2595 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
2596 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2597 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
2598 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2599 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2600 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2601 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2602 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2603 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2604 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2605 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2606 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2607 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2610 case Intrinsic::aarch64_neon_st2lane: {
2611 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2612 return SelectStoreLane(Node, 2, AArch64::ST2i8);
2613 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2615 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2616 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2618 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2619 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2621 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2624 case Intrinsic::aarch64_neon_st3lane: {
2625 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2626 return SelectStoreLane(Node, 3, AArch64::ST3i8);
2627 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2629 return SelectStoreLane(Node, 3, AArch64::ST3i16);
2630 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2632 return SelectStoreLane(Node, 3, AArch64::ST3i32);
2633 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2635 return SelectStoreLane(Node, 3, AArch64::ST3i64);
2638 case Intrinsic::aarch64_neon_st4lane: {
2639 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2640 return SelectStoreLane(Node, 4, AArch64::ST4i8);
2641 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2643 return SelectStoreLane(Node, 4, AArch64::ST4i16);
2644 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2646 return SelectStoreLane(Node, 4, AArch64::ST4i32);
2647 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2649 return SelectStoreLane(Node, 4, AArch64::ST4i64);
2654 case AArch64ISD::LD2post: {
2655 if (VT == MVT::v8i8)
2656 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
2657 else if (VT == MVT::v16i8)
2658 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
2659 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2660 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
2661 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2662 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
2663 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2664 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
2665 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2666 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
2667 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2668 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2669 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2670 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
2673 case AArch64ISD::LD3post: {
2674 if (VT == MVT::v8i8)
2675 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
2676 else if (VT == MVT::v16i8)
2677 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
2678 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2679 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
2680 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2681 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
2682 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2683 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
2684 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2685 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
2686 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2687 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2688 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2689 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
2692 case AArch64ISD::LD4post: {
2693 if (VT == MVT::v8i8)
2694 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
2695 else if (VT == MVT::v16i8)
2696 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
2697 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2698 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
2699 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2700 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
2701 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2702 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
2703 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2704 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
2705 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2706 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2707 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2708 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
2711 case AArch64ISD::LD1x2post: {
2712 if (VT == MVT::v8i8)
2713 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
2714 else if (VT == MVT::v16i8)
2715 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
2716 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2717 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
2718 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2719 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
2720 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2721 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
2722 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2723 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
2724 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2725 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2726 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2727 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
2730 case AArch64ISD::LD1x3post: {
2731 if (VT == MVT::v8i8)
2732 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
2733 else if (VT == MVT::v16i8)
2734 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
2735 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2736 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
2737 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2738 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
2739 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2740 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
2741 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2742 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
2743 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2744 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2745 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2746 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
2749 case AArch64ISD::LD1x4post: {
2750 if (VT == MVT::v8i8)
2751 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
2752 else if (VT == MVT::v16i8)
2753 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
2754 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2755 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
2756 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2757 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
2758 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2759 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
2760 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2761 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
2762 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2763 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2764 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2765 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
2768 case AArch64ISD::LD1DUPpost: {
2769 if (VT == MVT::v8i8)
2770 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
2771 else if (VT == MVT::v16i8)
2772 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
2773 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2774 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
2775 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2776 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
2777 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2778 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
2779 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2780 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
2781 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2782 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
2783 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2784 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
2787 case AArch64ISD::LD2DUPpost: {
2788 if (VT == MVT::v8i8)
2789 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
2790 else if (VT == MVT::v16i8)
2791 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
2792 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2793 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
2794 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2795 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
2796 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2797 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
2798 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2799 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
2800 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2801 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
2802 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2803 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
2806 case AArch64ISD::LD3DUPpost: {
2807 if (VT == MVT::v8i8)
2808 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
2809 else if (VT == MVT::v16i8)
2810 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
2811 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2812 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
2813 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2814 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
2815 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2816 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
2817 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2818 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
2819 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2820 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
2821 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2822 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
2825 case AArch64ISD::LD4DUPpost: {
2826 if (VT == MVT::v8i8)
2827 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
2828 else if (VT == MVT::v16i8)
2829 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
2830 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2831 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
2832 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2833 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
2834 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2835 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
2836 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2837 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
2838 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2839 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
2840 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2841 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
2844 case AArch64ISD::LD1LANEpost: {
2845 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2846 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
2847 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2849 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
2850 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2852 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
2853 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2855 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
2858 case AArch64ISD::LD2LANEpost: {
2859 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2860 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
2861 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2863 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
2864 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2866 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
2867 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2869 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
2872 case AArch64ISD::LD3LANEpost: {
2873 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2874 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
2875 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2877 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
2878 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2880 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
2881 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2883 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
2886 case AArch64ISD::LD4LANEpost: {
2887 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2888 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
2889 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2891 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
2892 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2894 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
2895 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2897 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
2900 case AArch64ISD::ST2post: {
2901 VT = Node->getOperand(1).getValueType();
2902 if (VT == MVT::v8i8)
2903 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
2904 else if (VT == MVT::v16i8)
2905 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
2906 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2907 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
2908 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2909 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
2910 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2911 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
2912 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2913 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
2914 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2915 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
2916 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2917 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
2920 case AArch64ISD::ST3post: {
2921 VT = Node->getOperand(1).getValueType();
2922 if (VT == MVT::v8i8)
2923 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
2924 else if (VT == MVT::v16i8)
2925 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
2926 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2927 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
2928 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2929 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
2930 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2931 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
2932 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2933 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
2934 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2935 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
2936 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2937 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
2940 case AArch64ISD::ST4post: {
2941 VT = Node->getOperand(1).getValueType();
2942 if (VT == MVT::v8i8)
2943 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
2944 else if (VT == MVT::v16i8)
2945 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
2946 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2947 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
2948 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2949 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
2950 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2951 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
2952 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2953 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
2954 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2955 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
2956 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2957 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
2960 case AArch64ISD::ST1x2post: {
2961 VT = Node->getOperand(1).getValueType();
2962 if (VT == MVT::v8i8)
2963 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
2964 else if (VT == MVT::v16i8)
2965 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
2966 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2967 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
2968 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2969 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
2970 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2971 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
2972 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2973 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
2974 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2975 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
2976 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2977 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
2980 case AArch64ISD::ST1x3post: {
2981 VT = Node->getOperand(1).getValueType();
2982 if (VT == MVT::v8i8)
2983 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
2984 else if (VT == MVT::v16i8)
2985 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
2986 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2987 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
2988 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2989 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
2990 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2991 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
2992 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2993 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
2994 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2995 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
2996 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2997 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
3000 case AArch64ISD::ST1x4post: {
3001 VT = Node->getOperand(1).getValueType();
3002 if (VT == MVT::v8i8)
3003 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
3004 else if (VT == MVT::v16i8)
3005 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
3006 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3007 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
3008 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3009 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
3010 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3011 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
3012 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3013 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3014 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3015 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3016 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3017 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3020 case AArch64ISD::ST2LANEpost: {
3021 VT = Node->getOperand(1).getValueType();
3022 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3023 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
3024 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3026 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3027 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3029 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3030 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3032 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3035 case AArch64ISD::ST3LANEpost: {
3036 VT = Node->getOperand(1).getValueType();
3037 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3038 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
3039 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3041 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3042 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3044 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3045 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3047 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3050 case AArch64ISD::ST4LANEpost: {
3051 VT = Node->getOperand(1).getValueType();
3052 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3053 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
3054 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3056 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3057 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3059 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3060 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3062 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3070 if (SDNode *I = SelectLIBM(Node))
3075 // Select the default instruction
3076 ResNode = SelectCode(Node);
3078 DEBUG(errs() << "=> ");
3079 if (ResNode == nullptr || ResNode == Node)
3080 DEBUG(Node->dump(CurDAG));
3082 DEBUG(ResNode->dump(CurDAG));
3083 DEBUG(errs() << "\n");
3088 /// createAArch64ISelDag - This pass converts a legalized DAG into a
3089 /// AArch64-specific DAG, ready for instruction scheduling.
3090 FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
3091 CodeGenOpt::Level OptLevel) {
3092 return new AArch64DAGToDAGISel(TM, OptLevel);