X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTarget%2FAArch64%2FAArch64ISelDAGToDAG.cpp;h=d93e59ccf77ce984031f4e0f2bab497e1b0e963f;hp=fb4e19c45fed0ecd71f1edc014b04a84cd2b288a;hb=221b8cc736bca9f714afd86a6fc4bc6e51aab00b;hpb=591c2f738a3e12026ff5504a486d54fc21fb3049 diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index fb4e19c45fe..d93e59ccf77 100644 --- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -11,1026 +11,3344 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "aarch64-isel" -#include "AArch64.h" -#include "AArch64InstrInfo.h" -#include "AArch64Subtarget.h" #include "AArch64TargetMachine.h" -#include "Utils/AArch64BaseInfo.h" +#include "MCTargetDesc/AArch64AddressingModes.h" #include "llvm/ADT/APSInt.h" #include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/IR/Function.h" // To access function attributes. #include "llvm/IR/GlobalValue.h" +#include "llvm/IR/Intrinsics.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; +#define DEBUG_TYPE "aarch64-isel" + //===--------------------------------------------------------------------===// -/// AArch64 specific code to select AArch64 machine instructions for -/// SelectionDAG operations. +/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine +/// instructions for SelectionDAG operations. /// namespace { class AArch64DAGToDAGISel : public SelectionDAGISel { AArch64TargetMachine &TM; - /// Keep a pointer to the AArch64Subtarget around so that we can + /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can /// make the right decision when generating code for different targets. const AArch64Subtarget *Subtarget; + bool ForCodeSize; + public: explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm, CodeGenOpt::Level OptLevel) - : SelectionDAGISel(tm, OptLevel), TM(tm), - Subtarget(&TM.getSubtarget()) { - } + : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr), + ForCodeSize(false) {} - virtual const char *getPassName() const { + const char *getPassName() const override { return "AArch64 Instruction Selection"; } - // Include the pieces autogenerated from the target description. -#include "AArch64GenDAGISel.inc" + bool runOnMachineFunction(MachineFunction &MF) override { + ForCodeSize = MF.getFunction()->optForSize(); + Subtarget = &MF.getSubtarget(); + return SelectionDAGISel::runOnMachineFunction(MF); + } - template - bool SelectOffsetUImm12(SDValue N, SDValue &UImm12) { - const ConstantSDNode *CN = dyn_cast(N); - if (!CN || CN->getZExtValue() % MemSize != 0 - || CN->getZExtValue() / MemSize > 0xfff) - return false; + SDNode *Select(SDNode *Node) override; - UImm12 = CurDAG->getTargetConstant(CN->getZExtValue() / MemSize, MVT::i64); - return true; + /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for + /// inline asm expressions. + bool SelectInlineAsmMemoryOperand(const SDValue &Op, + unsigned ConstraintID, + std::vector &OutOps) override; + + SDNode *SelectMLAV64LaneV128(SDNode *N); + SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N); + bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift); + bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift); + bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift); + bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) { + return SelectShiftedRegister(N, false, Reg, Shift); + } + bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) { + return SelectShiftedRegister(N, true, Reg, Shift); + } + bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) { + return SelectAddrModeIndexed(N, 1, Base, OffImm); + } + bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) { + return SelectAddrModeIndexed(N, 2, Base, OffImm); + } + bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) { + return SelectAddrModeIndexed(N, 4, Base, OffImm); + } + bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) { + return SelectAddrModeIndexed(N, 8, Base, OffImm); + } + bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) { + return SelectAddrModeIndexed(N, 16, Base, OffImm); + } + bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) { + return SelectAddrModeUnscaled(N, 1, Base, OffImm); + } + bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) { + return SelectAddrModeUnscaled(N, 2, Base, OffImm); + } + bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) { + return SelectAddrModeUnscaled(N, 4, Base, OffImm); + } + bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) { + return SelectAddrModeUnscaled(N, 8, Base, OffImm); + } + bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) { + return SelectAddrModeUnscaled(N, 16, Base, OffImm); } - template - bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) { - return SelectCVTFixedPosOperand(N, FixedPos, RegWidth); + template + bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset, + SDValue &SignExtend, SDValue &DoShift) { + return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift); } - /// Used for pre-lowered address-reference nodes, so we already know - /// the fields match. This operand's job is simply to add an - /// appropriate shift operand to the MOVZ/MOVK instruction. - template - bool SelectMOVWAddressRef(SDValue N, SDValue &Imm, SDValue &Shift) { - Imm = N; - Shift = CurDAG->getTargetConstant(LogShift, MVT::i32); - return true; + template + bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset, + SDValue &SignExtend, SDValue &DoShift) { + return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift); } - bool SelectFPZeroOperand(SDValue N, SDValue &Dummy); - bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, - unsigned RegWidth); + /// Form sequences of consecutive 64/128-bit registers for use in NEON + /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have + /// between 1 and 4 elements. If it contains a single element that is returned + /// unchanged; otherwise a REG_SEQUENCE value is returned. + SDValue createDTuple(ArrayRef Vecs); + SDValue createQTuple(ArrayRef Vecs); - bool SelectInlineAsmMemoryOperand(const SDValue &Op, - char ConstraintCode, - std::vector &OutOps); + /// Generic helper for the createDTuple/createQTuple + /// functions. Those should almost always be called instead. + SDValue createTuple(ArrayRef Vecs, const unsigned RegClassIDs[], + const unsigned SubRegs[]); - bool SelectLogicalImm(SDValue N, SDValue &Imm); + SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt); - template - bool SelectTSTBOperand(SDValue N, SDValue &FixedPos) { - return SelectTSTBOperand(N, FixedPos, RegWidth); - } + SDNode *SelectIndexedLoad(SDNode *N, bool &Done); - bool SelectTSTBOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth); + SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc, + unsigned SubRegIdx); + SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc, + unsigned SubRegIdx); + SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc); + SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc); - SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32, - unsigned Op64); + SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc); + SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc); + SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc); + SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc); - /// Put the given constant into a pool and return a DAG which will give its - /// address. - SDValue getConstantPoolItemAddress(SDLoc DL, const Constant *CV); + SDNode *SelectBitfieldExtractOp(SDNode *N); + SDNode *SelectBitfieldInsertOp(SDNode *N); - SDNode *TrySelectToMoveImm(SDNode *N); - SDNode *LowerToFPLitPool(SDNode *Node); - SDNode *SelectToLitPool(SDNode *N); + SDNode *SelectLIBM(SDNode *N); + SDNode *SelectFPConvertWithRound(SDNode *N); - SDNode* Select(SDNode*); -private: - /// Select NEON load intrinsics. NumVecs should be 1, 2, 3 or 4. - SDNode *SelectVLD(SDNode *N, unsigned NumVecs, bool isUpdating, - const uint16_t *Opcode); + SDNode *SelectReadRegister(SDNode *N); + SDNode *SelectWriteRegister(SDNode *N); + +// Include the pieces autogenerated from the target description. +#include "AArch64GenDAGISel.inc" - /// Select NEON store intrinsics. NumVecs should be 1, 2, 3 or 4. - SDNode *SelectVST(SDNode *N, unsigned NumVecs, bool isUpdating, - const uint16_t *Opcodes); +private: + bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg, + SDValue &Shift); + bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base, + SDValue &OffImm); + bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base, + SDValue &OffImm); + bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base, + SDValue &Offset, SDValue &SignExtend, + SDValue &DoShift); + bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base, + SDValue &Offset, SDValue &SignExtend, + SDValue &DoShift); + bool isWorthFolding(SDValue V) const; + bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend, + SDValue &Offset, SDValue &SignExtend); - // Form pairs of consecutive 64-bit/128-bit registers. - SDNode *createDPairNode(SDValue V0, SDValue V1); - SDNode *createQPairNode(SDValue V0, SDValue V1); + template + bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) { + return SelectCVTFixedPosOperand(N, FixedPos, RegWidth); + } - // Form sequences of 3 consecutive 64-bit/128-bit registers. - SDNode *createDTripleNode(SDValue V0, SDValue V1, SDValue V2); - SDNode *createQTripleNode(SDValue V0, SDValue V1, SDValue V2); + bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width); - // Form sequences of 4 consecutive 64-bit/128-bit registers. - SDNode *createDQuadNode(SDValue V0, SDValue V1, SDValue V2, SDValue V3); - SDNode *createQQuadNode(SDValue V0, SDValue V1, SDValue V2, SDValue V3); + SDNode *GenerateInexactFlagIfNeeded(const SDValue &In, unsigned InTyVariant, + SDLoc DL); }; +} // end anonymous namespace + +/// isIntImmediate - This method tests to see if the node is a constant +/// operand. If so Imm will receive the 32-bit value. +static bool isIntImmediate(const SDNode *N, uint64_t &Imm) { + if (const ConstantSDNode *C = dyn_cast(N)) { + Imm = C->getZExtValue(); + return true; + } + return false; } -bool -AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, - unsigned RegWidth) { - const ConstantFPSDNode *CN = dyn_cast(N); - if (!CN) return false; +// isIntImmediate - This method tests to see if a constant operand. +// If so Imm will receive the value. +static bool isIntImmediate(SDValue N, uint64_t &Imm) { + return isIntImmediate(N.getNode(), Imm); +} - // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits - // is between 1 and 32 for a destination w-register, or 1 and 64 for an - // x-register. - // - // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we - // want THIS_NODE to be 2^fbits. This is much easier to deal with using - // integers. - bool IsExact; +// isOpcWithIntImmediate - This method tests to see if the node is a specific +// opcode and that it has a immediate integer right operand. +// If so Imm will receive the 32 bit value. +static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc, + uint64_t &Imm) { + return N->getOpcode() == Opc && + isIntImmediate(N->getOperand(1).getNode(), Imm); +} - // fbits is between 1 and 64 in the worst-case, which means the fmul - // could have 2^64 as an actual operand. Need 65 bits of precision. - APSInt IntVal(65, true); - CN->getValueAPF().convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact); +bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand( + const SDValue &Op, unsigned ConstraintID, std::vector &OutOps) { + switch(ConstraintID) { + default: + llvm_unreachable("Unexpected asm memory constraint"); + case InlineAsm::Constraint_i: + case InlineAsm::Constraint_m: + case InlineAsm::Constraint_Q: + // Require the address to be in a register. That is safe for all AArch64 + // variants and it is hard to do anything much smarter without knowing + // how the operand is used. + OutOps.push_back(Op); + return false; + } + return true; +} - // N.b. isPowerOf2 also checks for > 0. - if (!IsExact || !IntVal.isPowerOf2()) return false; - unsigned FBits = IntVal.logBase2(); +/// SelectArithImmed - Select an immediate value that can be represented as +/// a 12-bit value shifted left by either 0 or 12. If so, return true with +/// Val set to the 12-bit value and Shift set to the shifter operand. +bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val, + SDValue &Shift) { + // This function is called from the addsub_shifted_imm ComplexPattern, + // which lists [imm] as the list of opcode it's interested in, however + // we still need to check whether the operand is actually an immediate + // here because the ComplexPattern opcode list is only used in + // root-level opcode matching. + if (!isa(N.getNode())) + return false; - // Checks above should have guaranteed that we haven't lost information in - // finding FBits, but it must still be in range. - if (FBits == 0 || FBits > RegWidth) return false; + uint64_t Immed = cast(N.getNode())->getZExtValue(); + unsigned ShiftAmt; + + if (Immed >> 12 == 0) { + ShiftAmt = 0; + } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) { + ShiftAmt = 12; + Immed = Immed >> 12; + } else + return false; - FixedPos = CurDAG->getTargetConstant(64 - FBits, MVT::i32); + unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt); + SDLoc dl(N); + Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32); + Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32); return true; } -bool -AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op, - char ConstraintCode, - std::vector &OutOps) { - switch (ConstraintCode) { - default: llvm_unreachable("Unrecognised AArch64 memory constraint"); - case 'm': - // FIXME: more freedom is actually permitted for 'm'. We can go - // hunting for a base and an offset if we want. Of course, since - // we don't really know how the operand is going to be used we're - // probably restricted to the load/store pair's simm7 as an offset - // range anyway. - case 'Q': - OutOps.push_back(Op); +/// SelectNegArithImmed - As above, but negates the value before trying to +/// select it. +bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val, + SDValue &Shift) { + // This function is called from the addsub_shifted_imm ComplexPattern, + // which lists [imm] as the list of opcode it's interested in, however + // we still need to check whether the operand is actually an immediate + // here because the ComplexPattern opcode list is only used in + // root-level opcode matching. + if (!isa(N.getNode())) + return false; + + // The immediate operand must be a 24-bit zero-extended immediate. + uint64_t Immed = cast(N.getNode())->getZExtValue(); + + // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0" + // have the opposite effect on the C flag, so this pattern mustn't match under + // those circumstances. + if (Immed == 0) + return false; + + if (N.getValueType() == MVT::i32) + Immed = ~((uint32_t)Immed) + 1; + else + Immed = ~Immed + 1ULL; + if (Immed & 0xFFFFFFFFFF000000ULL) + return false; + + Immed &= 0xFFFFFFULL; + return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val, + Shift); +} + +/// getShiftTypeForNode - Translate a shift node to the corresponding +/// ShiftType value. +static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) { + switch (N.getOpcode()) { + default: + return AArch64_AM::InvalidShiftExtend; + case ISD::SHL: + return AArch64_AM::LSL; + case ISD::SRL: + return AArch64_AM::LSR; + case ISD::SRA: + return AArch64_AM::ASR; + case ISD::ROTR: + return AArch64_AM::ROR; } +} +/// \brief Determine whether it is worth to fold V into an extended register. +bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const { + // it hurts if the value is used at least twice, unless we are optimizing + // for code size. + if (ForCodeSize || V.hasOneUse()) + return true; return false; } -bool -AArch64DAGToDAGISel::SelectFPZeroOperand(SDValue N, SDValue &Dummy) { - ConstantFPSDNode *Imm = dyn_cast(N); - if (!Imm || !Imm->getValueAPF().isPosZero()) +/// SelectShiftedRegister - Select a "shifted register" operand. If the value +/// is not shifted, set the Shift operand to default of "LSL 0". The logical +/// instructions allow the shifted register to be rotated, but the arithmetic +/// instructions do not. The AllowROR parameter specifies whether ROR is +/// supported. +bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR, + SDValue &Reg, SDValue &Shift) { + AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N); + if (ShType == AArch64_AM::InvalidShiftExtend) + return false; + if (!AllowROR && ShType == AArch64_AM::ROR) return false; - // Doesn't actually carry any information, but keeps TableGen quiet. - Dummy = CurDAG->getTargetConstant(0, MVT::i32); - return true; + if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + unsigned BitSize = N.getValueType().getSizeInBits(); + unsigned Val = RHS->getZExtValue() & (BitSize - 1); + unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val); + + Reg = N.getOperand(0); + Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32); + return isWorthFolding(N); + } + + return false; +} + +/// getExtendTypeForNode - Translate an extend node to the corresponding +/// ExtendType value. +static AArch64_AM::ShiftExtendType +getExtendTypeForNode(SDValue N, bool IsLoadStore = false) { + if (N.getOpcode() == ISD::SIGN_EXTEND || + N.getOpcode() == ISD::SIGN_EXTEND_INREG) { + EVT SrcVT; + if (N.getOpcode() == ISD::SIGN_EXTEND_INREG) + SrcVT = cast(N.getOperand(1))->getVT(); + else + SrcVT = N.getOperand(0).getValueType(); + + if (!IsLoadStore && SrcVT == MVT::i8) + return AArch64_AM::SXTB; + else if (!IsLoadStore && SrcVT == MVT::i16) + return AArch64_AM::SXTH; + else if (SrcVT == MVT::i32) + return AArch64_AM::SXTW; + assert(SrcVT != MVT::i64 && "extend from 64-bits?"); + + return AArch64_AM::InvalidShiftExtend; + } else if (N.getOpcode() == ISD::ZERO_EXTEND || + N.getOpcode() == ISD::ANY_EXTEND) { + EVT SrcVT = N.getOperand(0).getValueType(); + if (!IsLoadStore && SrcVT == MVT::i8) + return AArch64_AM::UXTB; + else if (!IsLoadStore && SrcVT == MVT::i16) + return AArch64_AM::UXTH; + else if (SrcVT == MVT::i32) + return AArch64_AM::UXTW; + assert(SrcVT != MVT::i64 && "extend from 64-bits?"); + + return AArch64_AM::InvalidShiftExtend; + } else if (N.getOpcode() == ISD::AND) { + ConstantSDNode *CSD = dyn_cast(N.getOperand(1)); + if (!CSD) + return AArch64_AM::InvalidShiftExtend; + uint64_t AndMask = CSD->getZExtValue(); + + switch (AndMask) { + default: + return AArch64_AM::InvalidShiftExtend; + case 0xFF: + return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend; + case 0xFFFF: + return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend; + case 0xFFFFFFFF: + return AArch64_AM::UXTW; + } + } + + return AArch64_AM::InvalidShiftExtend; } -bool AArch64DAGToDAGISel::SelectLogicalImm(SDValue N, SDValue &Imm) { - uint32_t Bits; - uint32_t RegWidth = N.getValueType().getSizeInBits(); +// Helper for SelectMLAV64LaneV128 - Recognize high lane extracts. +static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) { + if (DL->getOpcode() != AArch64ISD::DUPLANE16 && + DL->getOpcode() != AArch64ISD::DUPLANE32) + return false; - ConstantSDNode *CN = dyn_cast(N); - if (!CN) return false; + SDValue SV = DL->getOperand(0); + if (SV.getOpcode() != ISD::INSERT_SUBVECTOR) + return false; - if (!A64Imms::isLogicalImm(RegWidth, CN->getZExtValue(), Bits)) + SDValue EV = SV.getOperand(1); + if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR) return false; - Imm = CurDAG->getTargetConstant(Bits, MVT::i32); + ConstantSDNode *DLidx = cast(DL->getOperand(1).getNode()); + ConstantSDNode *EVidx = cast(EV.getOperand(1).getNode()); + LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue(); + LaneOp = EV.getOperand(0); + return true; } -SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) { - SDNode *ResNode; - SDLoc dl(Node); - EVT DestType = Node->getValueType(0); - unsigned DestWidth = DestType.getSizeInBits(); - - unsigned MOVOpcode; - EVT MOVType; - int UImm16, Shift; - uint32_t LogicalBits; - - uint64_t BitPat = cast(Node)->getZExtValue(); - if (A64Imms::isMOVZImm(DestWidth, BitPat, UImm16, Shift)) { - MOVType = DestType; - MOVOpcode = DestWidth == 64 ? AArch64::MOVZxii : AArch64::MOVZwii; - } else if (A64Imms::isMOVNImm(DestWidth, BitPat, UImm16, Shift)) { - MOVType = DestType; - MOVOpcode = DestWidth == 64 ? AArch64::MOVNxii : AArch64::MOVNwii; - } else if (DestWidth == 64 && A64Imms::isMOVNImm(32, BitPat, UImm16, Shift)) { - // To get something like 0x0000_0000_ffff_1234 into a 64-bit register we can - // use a 32-bit instruction: "movn w0, 0xedbc". - MOVType = MVT::i32; - MOVOpcode = AArch64::MOVNwii; - } else if (A64Imms::isLogicalImm(DestWidth, BitPat, LogicalBits)) { - MOVOpcode = DestWidth == 64 ? AArch64::ORRxxi : AArch64::ORRwwi; - uint16_t ZR = DestWidth == 64 ? AArch64::XZR : AArch64::WZR; - - return CurDAG->getMachineNode(MOVOpcode, dl, DestType, - CurDAG->getRegister(ZR, DestType), - CurDAG->getTargetConstant(LogicalBits, MVT::i32)); - } else { - // Can't handle it in one instruction. There's scope for permitting two (or - // more) instructions, but that'll need more thought. - return NULL; - } - - ResNode = CurDAG->getMachineNode(MOVOpcode, dl, MOVType, - CurDAG->getTargetConstant(UImm16, MVT::i32), - CurDAG->getTargetConstant(Shift, MVT::i32)); +// Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a +// high lane extract. +static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp, + SDValue &LaneOp, int &LaneIdx) { - if (MOVType != DestType) { - ResNode = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, dl, - MVT::i64, MVT::i32, MVT::Other, - CurDAG->getTargetConstant(0, MVT::i64), - SDValue(ResNode, 0), - CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32)); + if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) { + std::swap(Op0, Op1); + if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) + return false; } - - return ResNode; + StdOp = Op1; + return true; } -SDValue -AArch64DAGToDAGISel::getConstantPoolItemAddress(SDLoc DL, - const Constant *CV) { - EVT PtrVT = getTargetLowering()->getPointerTy(); - - switch (getTargetLowering()->getTargetMachine().getCodeModel()) { - case CodeModel::Small: { - unsigned Alignment = - getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType()); - return CurDAG->getNode( - AArch64ISD::WrapperSmall, DL, PtrVT, - CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_NO_FLAG), - CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_LO12), - CurDAG->getConstant(Alignment, MVT::i32)); - } - case CodeModel::Large: { - SDNode *LitAddr; - LitAddr = CurDAG->getMachineNode( - AArch64::MOVZxii, DL, PtrVT, - CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G3), - CurDAG->getTargetConstant(3, MVT::i32)); - LitAddr = CurDAG->getMachineNode( - AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0), - CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC), - CurDAG->getTargetConstant(2, MVT::i32)); - LitAddr = CurDAG->getMachineNode( - AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0), - CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC), - CurDAG->getTargetConstant(1, MVT::i32)); - LitAddr = CurDAG->getMachineNode( - AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0), - CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC), - CurDAG->getTargetConstant(0, MVT::i32)); - return SDValue(LitAddr, 0); +/// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand +/// is a lane in the upper half of a 128-bit vector. Recognize and select this +/// so that we don't emit unnecessary lane extracts. +SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) { + SDLoc dl(N); + SDValue Op0 = N->getOperand(0); + SDValue Op1 = N->getOperand(1); + SDValue MLAOp1; // Will hold ordinary multiplicand for MLA. + SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA. + int LaneIdx = -1; // Will hold the lane index. + + if (Op1.getOpcode() != ISD::MUL || + !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2, + LaneIdx)) { + std::swap(Op0, Op1); + if (Op1.getOpcode() != ISD::MUL || + !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2, + LaneIdx)) + return nullptr; } + + SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64); + + SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal }; + + unsigned MLAOpc = ~0U; + + switch (N->getSimpleValueType(0).SimpleTy) { default: - llvm_unreachable("Only small and large code models supported now"); + llvm_unreachable("Unrecognized MLA."); + case MVT::v4i16: + MLAOpc = AArch64::MLAv4i16_indexed; + break; + case MVT::v8i16: + MLAOpc = AArch64::MLAv8i16_indexed; + break; + case MVT::v2i32: + MLAOpc = AArch64::MLAv2i32_indexed; + break; + case MVT::v4i32: + MLAOpc = AArch64::MLAv4i32_indexed; + break; } + + return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops); } -SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) { - SDLoc DL(Node); - uint64_t UnsignedVal = cast(Node)->getZExtValue(); - int64_t SignedVal = cast(Node)->getSExtValue(); - EVT DestType = Node->getValueType(0); +SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) { + SDLoc dl(N); + SDValue SMULLOp0; + SDValue SMULLOp1; + int LaneIdx; - // Since we may end up loading a 64-bit constant from a 32-bit entry the - // constant in the pool may have a different type to the eventual node. - ISD::LoadExtType Extension; - EVT MemType; + if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1, + LaneIdx)) + return nullptr; - assert((DestType == MVT::i64 || DestType == MVT::i32) - && "Only expect integer constants at the moment"); + SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64); - if (DestType == MVT::i32) { - Extension = ISD::NON_EXTLOAD; - MemType = MVT::i32; - } else if (UnsignedVal <= UINT32_MAX) { - Extension = ISD::ZEXTLOAD; - MemType = MVT::i32; - } else if (SignedVal >= INT32_MIN && SignedVal <= INT32_MAX) { - Extension = ISD::SEXTLOAD; - MemType = MVT::i32; - } else { - Extension = ISD::NON_EXTLOAD; - MemType = MVT::i64; - } + SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal }; - Constant *CV = ConstantInt::get(Type::getIntNTy(*CurDAG->getContext(), - MemType.getSizeInBits()), - UnsignedVal); - SDValue PoolAddr = getConstantPoolItemAddress(DL, CV); - unsigned Alignment = - getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType()); + unsigned SMULLOpc = ~0U; - return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(), - PoolAddr, - MachinePointerInfo::getConstantPool(), MemType, - /* isVolatile = */ false, - /* isNonTemporal = */ false, - Alignment).getNode(); -} + if (IntNo == Intrinsic::aarch64_neon_smull) { + switch (N->getSimpleValueType(0).SimpleTy) { + default: + llvm_unreachable("Unrecognized SMULL."); + case MVT::v4i32: + SMULLOpc = AArch64::SMULLv4i16_indexed; + break; + case MVT::v2i64: + SMULLOpc = AArch64::SMULLv2i32_indexed; + break; + } + } else if (IntNo == Intrinsic::aarch64_neon_umull) { + switch (N->getSimpleValueType(0).SimpleTy) { + default: + llvm_unreachable("Unrecognized SMULL."); + case MVT::v4i32: + SMULLOpc = AArch64::UMULLv4i16_indexed; + break; + case MVT::v2i64: + SMULLOpc = AArch64::UMULLv2i32_indexed; + break; + } + } else + llvm_unreachable("Unrecognized intrinsic."); -SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) { - SDLoc DL(Node); - const ConstantFP *FV = cast(Node)->getConstantFPValue(); - EVT DestType = Node->getValueType(0); + return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops); +} - unsigned Alignment = - getTargetLowering()->getDataLayout()->getABITypeAlignment(FV->getType()); - SDValue PoolAddr = getConstantPoolItemAddress(DL, FV); +/// Instructions that accept extend modifiers like UXTW expect the register +/// being extended to be a GPR32, but the incoming DAG might be acting on a +/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if +/// this is the case. +static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) { + if (N.getValueType() == MVT::i32) + return N; - return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr, - MachinePointerInfo::getConstantPool(), - /* isVolatile = */ false, - /* isNonTemporal = */ false, - /* isInvariant = */ true, - Alignment).getNode(); + SDLoc dl(N); + SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32); + MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, + dl, MVT::i32, N, SubReg); + return SDValue(Node, 0); } -bool -AArch64DAGToDAGISel::SelectTSTBOperand(SDValue N, SDValue &FixedPos, - unsigned RegWidth) { - const ConstantSDNode *CN = dyn_cast(N); - if (!CN) return false; - uint64_t Val = CN->getZExtValue(); +/// SelectArithExtendedRegister - Select a "extended register" operand. This +/// operand folds in an extend followed by an optional left shift. +bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg, + SDValue &Shift) { + unsigned ShiftVal = 0; + AArch64_AM::ShiftExtendType Ext; + + if (N.getOpcode() == ISD::SHL) { + ConstantSDNode *CSD = dyn_cast(N.getOperand(1)); + if (!CSD) + return false; + ShiftVal = CSD->getZExtValue(); + if (ShiftVal > 4) + return false; - if (!isPowerOf2_64(Val)) return false; + Ext = getExtendTypeForNode(N.getOperand(0)); + if (Ext == AArch64_AM::InvalidShiftExtend) + return false; - unsigned TestedBit = Log2_64(Val); - // Checks above should have guaranteed that we haven't lost information in - // finding TestedBit, but it must still be in range. - if (TestedBit >= RegWidth) return false; + Reg = N.getOperand(0).getOperand(0); + } else { + Ext = getExtendTypeForNode(N); + if (Ext == AArch64_AM::InvalidShiftExtend) + return false; + + Reg = N.getOperand(0); + } + + // AArch64 mandates that the RHS of the operation must use the smallest + // register classs that could contain the size being extended from. Thus, + // if we're folding a (sext i8), we need the RHS to be a GPR32, even though + // there might not be an actual 32-bit value in the program. We can + // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here. + assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX); + Reg = narrowIfNeeded(CurDAG, Reg); + Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N), + MVT::i32); + return isWorthFolding(N); +} + +/// If there's a use of this ADDlow that's not itself a load/store then we'll +/// need to create a real ADD instruction from it anyway and there's no point in +/// folding it into the mem op. Theoretically, it shouldn't matter, but there's +/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding +/// leads to duplaicated ADRP instructions. +static bool isWorthFoldingADDlow(SDValue N) { + for (auto Use : N->uses()) { + if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE && + Use->getOpcode() != ISD::ATOMIC_LOAD && + Use->getOpcode() != ISD::ATOMIC_STORE) + return false; + + // ldar and stlr have much more restrictive addressing modes (just a + // register). + if (cast(Use)->getOrdering() > Monotonic) + return false; + } - FixedPos = CurDAG->getTargetConstant(TestedBit, MVT::i64); return true; } -SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8, - unsigned Op16,unsigned Op32, - unsigned Op64) { - // Mostly direct translation to the given operations, except that we preserve - // the AtomicOrdering for use later on. - AtomicSDNode *AN = cast(Node); - EVT VT = AN->getMemoryVT(); - - unsigned Op; - if (VT == MVT::i8) - Op = Op8; - else if (VT == MVT::i16) - Op = Op16; - else if (VT == MVT::i32) - Op = Op32; - else if (VT == MVT::i64) - Op = Op64; - else - llvm_unreachable("Unexpected atomic operation"); +/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit +/// immediate" address. The "Size" argument is the size in bytes of the memory +/// reference, which determines the scale. +bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size, + SDValue &Base, SDValue &OffImm) { + SDLoc dl(N); + const DataLayout &DL = CurDAG->getDataLayout(); + const TargetLowering *TLI = getTargetLowering(); + if (N.getOpcode() == ISD::FrameIndex) { + int FI = cast(N)->getIndex(); + Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); + OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64); + return true; + } - SmallVector Ops; - for (unsigned i = 1; i < AN->getNumOperands(); ++i) - Ops.push_back(AN->getOperand(i)); - - Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32)); - Ops.push_back(AN->getOperand(0)); // Chain moves to the end - - return CurDAG->SelectNodeTo(Node, Op, - AN->getValueType(0), MVT::Other, - &Ops[0], Ops.size()); -} - -SDNode *AArch64DAGToDAGISel::createDPairNode(SDValue V0, SDValue V1) { - SDLoc dl(V0.getNode()); - SDValue RegClass = - CurDAG->getTargetConstant(AArch64::DPairRegClassID, MVT::i32); - SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::dsub_0, MVT::i32); - SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::dsub_1, MVT::i32); - const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::v2i64, - Ops); -} - -SDNode *AArch64DAGToDAGISel::createQPairNode(SDValue V0, SDValue V1) { - SDLoc dl(V0.getNode()); - SDValue RegClass = - CurDAG->getTargetConstant(AArch64::QPairRegClassID, MVT::i32); - SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::qsub_0, MVT::i32); - SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::qsub_1, MVT::i32); - const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::v4i64, - Ops); -} - -SDNode *AArch64DAGToDAGISel::createDTripleNode(SDValue V0, SDValue V1, - SDValue V2) { - SDLoc dl(V0.getNode()); - SDValue RegClass = - CurDAG->getTargetConstant(AArch64::DTripleRegClassID, MVT::i32); - SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::dsub_0, MVT::i32); - SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::dsub_1, MVT::i32); - SDValue SubReg2 = CurDAG->getTargetConstant(AArch64::dsub_2, MVT::i32); - const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, V2, SubReg2 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, - Ops); -} - -SDNode *AArch64DAGToDAGISel::createQTripleNode(SDValue V0, SDValue V1, - SDValue V2) { - SDLoc dl(V0.getNode()); - SDValue RegClass = - CurDAG->getTargetConstant(AArch64::QTripleRegClassID, MVT::i32); - SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::qsub_0, MVT::i32); - SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::qsub_1, MVT::i32); - SDValue SubReg2 = CurDAG->getTargetConstant(AArch64::qsub_2, MVT::i32); - const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, V2, SubReg2 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, - Ops); -} - -SDNode *AArch64DAGToDAGISel::createDQuadNode(SDValue V0, SDValue V1, SDValue V2, - SDValue V3) { - SDLoc dl(V0.getNode()); - SDValue RegClass = - CurDAG->getTargetConstant(AArch64::DQuadRegClassID, MVT::i32); - SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::dsub_0, MVT::i32); - SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::dsub_1, MVT::i32); - SDValue SubReg2 = CurDAG->getTargetConstant(AArch64::dsub_2, MVT::i32); - SDValue SubReg3 = CurDAG->getTargetConstant(AArch64::dsub_3, MVT::i32); - const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, - SubReg3 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::v4i64, - Ops); -} - -SDNode *AArch64DAGToDAGISel::createQQuadNode(SDValue V0, SDValue V1, SDValue V2, - SDValue V3) { - SDLoc dl(V0.getNode()); - SDValue RegClass = - CurDAG->getTargetConstant(AArch64::QQuadRegClassID, MVT::i32); - SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::qsub_0, MVT::i32); - SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::qsub_1, MVT::i32); - SDValue SubReg2 = CurDAG->getTargetConstant(AArch64::qsub_2, MVT::i32); - SDValue SubReg3 = CurDAG->getTargetConstant(AArch64::qsub_3, MVT::i32); - const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, - SubReg3 }; - return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::v8i64, - Ops); -} - -// Get the register stride update opcode of a VLD/VST instruction that -// is otherwise equivalent to the given fixed stride updating instruction. -static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) { - switch (Opc) { - default: break; - case AArch64::LD1WB_8B_fixed: return AArch64::LD1WB_8B_register; - case AArch64::LD1WB_4H_fixed: return AArch64::LD1WB_4H_register; - case AArch64::LD1WB_2S_fixed: return AArch64::LD1WB_2S_register; - case AArch64::LD1WB_1D_fixed: return AArch64::LD1WB_1D_register; - case AArch64::LD1WB_16B_fixed: return AArch64::LD1WB_16B_register; - case AArch64::LD1WB_8H_fixed: return AArch64::LD1WB_8H_register; - case AArch64::LD1WB_4S_fixed: return AArch64::LD1WB_4S_register; - case AArch64::LD1WB_2D_fixed: return AArch64::LD1WB_2D_register; - - case AArch64::LD2WB_8B_fixed: return AArch64::LD2WB_8B_register; - case AArch64::LD2WB_4H_fixed: return AArch64::LD2WB_4H_register; - case AArch64::LD2WB_2S_fixed: return AArch64::LD2WB_2S_register; - case AArch64::LD1WB2V_1D_fixed: return AArch64::LD1WB2V_1D_register; - case AArch64::LD2WB_16B_fixed: return AArch64::LD2WB_16B_register; - case AArch64::LD2WB_8H_fixed: return AArch64::LD2WB_8H_register; - case AArch64::LD2WB_4S_fixed: return AArch64::LD2WB_4S_register; - case AArch64::LD2WB_2D_fixed: return AArch64::LD2WB_2D_register; - - case AArch64::LD3WB_8B_fixed: return AArch64::LD3WB_8B_register; - case AArch64::LD3WB_4H_fixed: return AArch64::LD3WB_4H_register; - case AArch64::LD3WB_2S_fixed: return AArch64::LD3WB_2S_register; - case AArch64::LD1WB3V_1D_fixed: return AArch64::LD1WB3V_1D_register; - case AArch64::LD3WB_16B_fixed: return AArch64::LD3WB_16B_register; - case AArch64::LD3WB_8H_fixed: return AArch64::LD3WB_8H_register; - case AArch64::LD3WB_4S_fixed: return AArch64::LD3WB_4S_register; - case AArch64::LD3WB_2D_fixed: return AArch64::LD3WB_2D_register; - - case AArch64::LD4WB_8B_fixed: return AArch64::LD4WB_8B_register; - case AArch64::LD4WB_4H_fixed: return AArch64::LD4WB_4H_register; - case AArch64::LD4WB_2S_fixed: return AArch64::LD4WB_2S_register; - case AArch64::LD1WB4V_1D_fixed: return AArch64::LD1WB4V_1D_register; - case AArch64::LD4WB_16B_fixed: return AArch64::LD4WB_16B_register; - case AArch64::LD4WB_8H_fixed: return AArch64::LD4WB_8H_register; - case AArch64::LD4WB_4S_fixed: return AArch64::LD4WB_4S_register; - case AArch64::LD4WB_2D_fixed: return AArch64::LD4WB_2D_register; - - case AArch64::ST1WB_8B_fixed: return AArch64::ST1WB_8B_register; - case AArch64::ST1WB_4H_fixed: return AArch64::ST1WB_4H_register; - case AArch64::ST1WB_2S_fixed: return AArch64::ST1WB_2S_register; - case AArch64::ST1WB_1D_fixed: return AArch64::ST1WB_1D_register; - case AArch64::ST1WB_16B_fixed: return AArch64::ST1WB_16B_register; - case AArch64::ST1WB_8H_fixed: return AArch64::ST1WB_8H_register; - case AArch64::ST1WB_4S_fixed: return AArch64::ST1WB_4S_register; - case AArch64::ST1WB_2D_fixed: return AArch64::ST1WB_2D_register; - - case AArch64::ST2WB_8B_fixed: return AArch64::ST2WB_8B_register; - case AArch64::ST2WB_4H_fixed: return AArch64::ST2WB_4H_register; - case AArch64::ST2WB_2S_fixed: return AArch64::ST2WB_2S_register; - case AArch64::ST1WB2V_1D_fixed: return AArch64::ST1WB2V_1D_register; - case AArch64::ST2WB_16B_fixed: return AArch64::ST2WB_16B_register; - case AArch64::ST2WB_8H_fixed: return AArch64::ST2WB_8H_register; - case AArch64::ST2WB_4S_fixed: return AArch64::ST2WB_4S_register; - case AArch64::ST2WB_2D_fixed: return AArch64::ST2WB_2D_register; - - case AArch64::ST3WB_8B_fixed: return AArch64::ST3WB_8B_register; - case AArch64::ST3WB_4H_fixed: return AArch64::ST3WB_4H_register; - case AArch64::ST3WB_2S_fixed: return AArch64::ST3WB_2S_register; - case AArch64::ST1WB3V_1D_fixed: return AArch64::ST1WB3V_1D_register; - case AArch64::ST3WB_16B_fixed: return AArch64::ST3WB_16B_register; - case AArch64::ST3WB_8H_fixed: return AArch64::ST3WB_8H_register; - case AArch64::ST3WB_4S_fixed: return AArch64::ST3WB_4S_register; - case AArch64::ST3WB_2D_fixed: return AArch64::ST3WB_2D_register; - - case AArch64::ST4WB_8B_fixed: return AArch64::ST4WB_8B_register; - case AArch64::ST4WB_4H_fixed: return AArch64::ST4WB_4H_register; - case AArch64::ST4WB_2S_fixed: return AArch64::ST4WB_2S_register; - case AArch64::ST1WB4V_1D_fixed: return AArch64::ST1WB4V_1D_register; - case AArch64::ST4WB_16B_fixed: return AArch64::ST4WB_16B_register; - case AArch64::ST4WB_8H_fixed: return AArch64::ST4WB_8H_register; - case AArch64::ST4WB_4S_fixed: return AArch64::ST4WB_4S_register; - case AArch64::ST4WB_2D_fixed: return AArch64::ST4WB_2D_register; - } - return Opc; // If not one we handle, return it unchanged. -} - -SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs, - bool isUpdating, - const uint16_t *Opcodes) { - assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range"); + if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) { + GlobalAddressSDNode *GAN = + dyn_cast(N.getOperand(1).getNode()); + Base = N.getOperand(0); + OffImm = N.getOperand(1); + if (!GAN) + return true; + + const GlobalValue *GV = GAN->getGlobal(); + unsigned Alignment = GV->getAlignment(); + Type *Ty = GV->getType()->getElementType(); + if (Alignment == 0 && Ty->isSized()) + Alignment = DL.getABITypeAlignment(Ty); + + if (Alignment >= Size) + return true; + } - EVT VT = N->getValueType(0); - unsigned OpcodeIndex; - switch (VT.getSimpleVT().SimpleTy) { - default: llvm_unreachable("unhandled vector load type"); - case MVT::v8i8: OpcodeIndex = 0; break; - case MVT::v4i16: OpcodeIndex = 1; break; - case MVT::v2f32: - case MVT::v2i32: OpcodeIndex = 2; break; - case MVT::v1f64: - case MVT::v1i64: OpcodeIndex = 3; break; - case MVT::v16i8: OpcodeIndex = 4; break; - case MVT::v8f16: - case MVT::v8i16: OpcodeIndex = 5; break; - case MVT::v4f32: - case MVT::v4i32: OpcodeIndex = 6; break; - case MVT::v2f64: - case MVT::v2i64: OpcodeIndex = 7; break; - } - unsigned Opc = Opcodes[OpcodeIndex]; + if (CurDAG->isBaseWithConstantOffset(N)) { + if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + int64_t RHSC = (int64_t)RHS->getZExtValue(); + unsigned Scale = Log2_32(Size); + if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) { + Base = N.getOperand(0); + if (Base.getOpcode() == ISD::FrameIndex) { + int FI = cast(Base)->getIndex(); + Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); + } + OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64); + return true; + } + } + } - SmallVector Ops; - unsigned AddrOpIdx = isUpdating ? 1 : 2; - Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address + // Before falling back to our general case, check if the unscaled + // instructions can handle this. If so, that's preferable. + if (SelectAddrModeUnscaled(N, Size, Base, OffImm)) + return false; - if (isUpdating) { - SDValue Inc = N->getOperand(AddrOpIdx + 1); - if (!isa(Inc.getNode())) // Increment in Register - Opc = getVLDSTRegisterUpdateOpcode(Opc); - Ops.push_back(Inc); + // Base only. The address will be materialized into a register before + // the memory is accessed. + // add x0, Xbase, #offset + // ldr x0, [x0] + Base = N; + OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64); + return true; +} + +/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit +/// immediate" address. This should only match when there is an offset that +/// is not valid for a scaled immediate addressing mode. The "Size" argument +/// is the size in bytes of the memory reference, which is needed here to know +/// what is valid for a scaled immediate. +bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size, + SDValue &Base, + SDValue &OffImm) { + if (!CurDAG->isBaseWithConstantOffset(N)) + return false; + if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + int64_t RHSC = RHS->getSExtValue(); + // If the offset is valid as a scaled immediate, don't match here. + if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && + RHSC < (0x1000 << Log2_32(Size))) + return false; + if (RHSC >= -256 && RHSC < 256) { + Base = N.getOperand(0); + if (Base.getOpcode() == ISD::FrameIndex) { + int FI = cast(Base)->getIndex(); + const TargetLowering *TLI = getTargetLowering(); + Base = CurDAG->getTargetFrameIndex( + FI, TLI->getPointerTy(CurDAG->getDataLayout())); + } + OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64); + return true; + } } + return false; +} - Ops.push_back(N->getOperand(0)); // Push back the Chain +static SDValue Widen(SelectionDAG *CurDAG, SDValue N) { + SDLoc dl(N); + SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32); + SDValue ImpDef = SDValue( + CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0); + MachineSDNode *Node = CurDAG->getMachineNode( + TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg); + return SDValue(Node, 0); +} - std::vector ResTys; - bool is64BitVector = VT.is64BitVector(); +/// \brief Check if the given SHL node (\p N), can be used to form an +/// extended register for an addressing mode. +bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size, + bool WantExtend, SDValue &Offset, + SDValue &SignExtend) { + assert(N.getOpcode() == ISD::SHL && "Invalid opcode."); + ConstantSDNode *CSD = dyn_cast(N.getOperand(1)); + if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue()) + return false; - if (NumVecs == 1) - ResTys.push_back(VT); - else if (NumVecs == 3) - ResTys.push_back(MVT::Untyped); - else { - EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, - is64BitVector ? NumVecs : NumVecs * 2); - ResTys.push_back(ResTy); - } - - if (isUpdating) - ResTys.push_back(MVT::i64); // Type of the updated register - ResTys.push_back(MVT::Other); // Type of the Chain SDLoc dl(N); - SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); + if (WantExtend) { + AArch64_AM::ShiftExtendType Ext = + getExtendTypeForNode(N.getOperand(0), true); + if (Ext == AArch64_AM::InvalidShiftExtend) + return false; - // Transfer memoperands. - MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); - MemOp[0] = cast(N)->getMemOperand(); - cast(VLd)->setMemRefs(MemOp, MemOp + 1); + Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0)); + SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl, + MVT::i32); + } else { + Offset = N.getOperand(0); + SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32); + } - if (NumVecs == 1) - return VLd; + unsigned LegalShiftVal = Log2_32(Size); + unsigned ShiftVal = CSD->getZExtValue(); - // If NumVecs > 1, the return result is a super register containing 2-4 - // consecutive vector registers. - SDValue SuperReg = SDValue(VLd, 0); + if (ShiftVal != 0 && ShiftVal != LegalShiftVal) + return false; - unsigned Sub0 = is64BitVector ? AArch64::dsub_0 : AArch64::qsub_0; - for (unsigned Vec = 0; Vec < NumVecs; ++Vec) - ReplaceUses(SDValue(N, Vec), - CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg)); - // Update users of the Chain - ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1)); - if (isUpdating) - ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2)); + if (isWorthFolding(N)) + return true; - return NULL; + return false; } -SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs, - bool isUpdating, - const uint16_t *Opcodes) { - assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range"); +bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size, + SDValue &Base, SDValue &Offset, + SDValue &SignExtend, + SDValue &DoShift) { + if (N.getOpcode() != ISD::ADD) + return false; + SDValue LHS = N.getOperand(0); + SDValue RHS = N.getOperand(1); SDLoc dl(N); - MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); - MemOp[0] = cast(N)->getMemOperand(); + // We don't want to match immediate adds here, because they are better lowered + // to the register-immediate addressing modes. + if (isa(LHS) || isa(RHS)) + return false; + + // Check if this particular node is reused in any non-memory related + // operation. If yes, do not try to fold this node into the address + // computation, since the computation will be kept. + const SDNode *Node = N.getNode(); + for (SDNode *UI : Node->uses()) { + if (!isa(*UI)) + return false; + } - unsigned AddrOpIdx = isUpdating ? 1 : 2; - unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1) - EVT VT = N->getOperand(Vec0Idx).getValueType(); - unsigned OpcodeIndex; - switch (VT.getSimpleVT().SimpleTy) { - default: llvm_unreachable("unhandled vector store type"); - case MVT::v8i8: OpcodeIndex = 0; break; - case MVT::v4i16: OpcodeIndex = 1; break; - case MVT::v2f32: - case MVT::v2i32: OpcodeIndex = 2; break; - case MVT::v1f64: - case MVT::v1i64: OpcodeIndex = 3; break; - case MVT::v16i8: OpcodeIndex = 4; break; - case MVT::v8f16: - case MVT::v8i16: OpcodeIndex = 5; break; - case MVT::v4f32: - case MVT::v4i32: OpcodeIndex = 6; break; - case MVT::v2f64: - case MVT::v2i64: OpcodeIndex = 7; break; - } - unsigned Opc = Opcodes[OpcodeIndex]; - - std::vector ResTys; - if (isUpdating) - ResTys.push_back(MVT::i64); - ResTys.push_back(MVT::Other); // Type for the Chain + // Remember if it is worth folding N when it produces extended register. + bool IsExtendedRegisterWorthFolding = isWorthFolding(N); - SmallVector Ops; - Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address + // Try to match a shifted extend on the RHS. + if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL && + SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) { + Base = LHS; + DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32); + return true; + } - if (isUpdating) { - SDValue Inc = N->getOperand(AddrOpIdx + 1); - if (!isa(Inc.getNode())) // Increment in Register - Opc = getVLDSTRegisterUpdateOpcode(Opc); - Ops.push_back(Inc); + // Try to match a shifted extend on the LHS. + if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL && + SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) { + Base = RHS; + DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32); + return true; } - bool is64BitVector = VT.is64BitVector(); - SDValue V0 = N->getOperand(Vec0Idx + 0); - SDValue SrcReg; - if (NumVecs == 1) - SrcReg = V0; - else { - SDValue V1 = N->getOperand(Vec0Idx + 1); - if (NumVecs == 2) - SrcReg = is64BitVector ? SDValue(createDPairNode(V0, V1), 0) - : SDValue(createQPairNode(V0, V1), 0); - else { - SDValue V2 = N->getOperand(Vec0Idx + 2); - if (NumVecs == 3) - SrcReg = is64BitVector ? SDValue(createDTripleNode(V0, V1, V2), 0) - : SDValue(createQTripleNode(V0, V1, V2), 0); - else { - SDValue V3 = N->getOperand(Vec0Idx + 3); - SrcReg = is64BitVector ? SDValue(createDQuadNode(V0, V1, V2, V3), 0) - : SDValue(createQQuadNode(V0, V1, V2, V3), 0); - } - } + // There was no shift, whatever else we find. + DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32); + + AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend; + // Try to match an unshifted extend on the LHS. + if (IsExtendedRegisterWorthFolding && + (Ext = getExtendTypeForNode(LHS, true)) != + AArch64_AM::InvalidShiftExtend) { + Base = RHS; + Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0)); + SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl, + MVT::i32); + if (isWorthFolding(LHS)) + return true; } - Ops.push_back(SrcReg); - // Push back the Chain - Ops.push_back(N->getOperand(0)); + // Try to match an unshifted extend on the RHS. + if (IsExtendedRegisterWorthFolding && + (Ext = getExtendTypeForNode(RHS, true)) != + AArch64_AM::InvalidShiftExtend) { + Base = LHS; + Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0)); + SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl, + MVT::i32); + if (isWorthFolding(RHS)) + return true; + } - // Transfer memoperands. - SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); - cast(VSt)->setMemRefs(MemOp, MemOp + 1); + return false; +} - return VSt; +// Check if the given immediate is preferred by ADD. If an immediate can be +// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be +// encoded by one MOVZ, return true. +static bool isPreferredADD(int64_t ImmOff) { + // Constant in [0x0, 0xfff] can be encoded in ADD. + if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL) + return true; + // Check if it can be encoded in an "ADD LSL #12". + if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL) + // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant. + return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL && + (ImmOff & 0xffffffffffff0fffLL) != 0x0LL; + return false; } -SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { - // Dump information about the Node being selected - DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n"); +bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size, + SDValue &Base, SDValue &Offset, + SDValue &SignExtend, + SDValue &DoShift) { + if (N.getOpcode() != ISD::ADD) + return false; + SDValue LHS = N.getOperand(0); + SDValue RHS = N.getOperand(1); + SDLoc DL(N); + + // Check if this particular node is reused in any non-memory related + // operation. If yes, do not try to fold this node into the address + // computation, since the computation will be kept. + const SDNode *Node = N.getNode(); + for (SDNode *UI : Node->uses()) { + if (!isa(*UI)) + return false; + } - if (Node->isMachineOpcode()) { - DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n"); - Node->setNodeId(-1); - return NULL; + // Watch out if RHS is a wide immediate, it can not be selected into + // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into + // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate + // instructions like: + // MOV X0, WideImmediate + // ADD X1, BaseReg, X0 + // LDR X2, [X1, 0] + // For such situation, using [BaseReg, XReg] addressing mode can save one + // ADD/SUB: + // MOV X0, WideImmediate + // LDR X2, [BaseReg, X0] + if (isa(RHS)) { + int64_t ImmOff = (int64_t)cast(RHS)->getZExtValue(); + unsigned Scale = Log2_32(Size); + // Skip the immediate can be seleced by load/store addressing mode. + // Also skip the immediate can be encoded by a single ADD (SUB is also + // checked by using -ImmOff). + if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) || + isPreferredADD(ImmOff) || isPreferredADD(-ImmOff)) + return false; + + SDValue Ops[] = { RHS }; + SDNode *MOVI = + CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops); + SDValue MOVIV = SDValue(MOVI, 0); + // This ADD of two X register will be selected into [Reg+Reg] mode. + N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV); } - switch (Node->getOpcode()) { - case ISD::ATOMIC_LOAD_ADD: - return SelectAtomic(Node, - AArch64::ATOMIC_LOAD_ADD_I8, - AArch64::ATOMIC_LOAD_ADD_I16, - AArch64::ATOMIC_LOAD_ADD_I32, - AArch64::ATOMIC_LOAD_ADD_I64); - case ISD::ATOMIC_LOAD_SUB: - return SelectAtomic(Node, - AArch64::ATOMIC_LOAD_SUB_I8, - AArch64::ATOMIC_LOAD_SUB_I16, - AArch64::ATOMIC_LOAD_SUB_I32, - AArch64::ATOMIC_LOAD_SUB_I64); - case ISD::ATOMIC_LOAD_AND: - return SelectAtomic(Node, - AArch64::ATOMIC_LOAD_AND_I8, - AArch64::ATOMIC_LOAD_AND_I16, - AArch64::ATOMIC_LOAD_AND_I32, - AArch64::ATOMIC_LOAD_AND_I64); - case ISD::ATOMIC_LOAD_OR: - return SelectAtomic(Node, - AArch64::ATOMIC_LOAD_OR_I8, - AArch64::ATOMIC_LOAD_OR_I16, - AArch64::ATOMIC_LOAD_OR_I32, - AArch64::ATOMIC_LOAD_OR_I64); - case ISD::ATOMIC_LOAD_XOR: - return SelectAtomic(Node, - AArch64::ATOMIC_LOAD_XOR_I8, - AArch64::ATOMIC_LOAD_XOR_I16, - AArch64::ATOMIC_LOAD_XOR_I32, - AArch64::ATOMIC_LOAD_XOR_I64); - case ISD::ATOMIC_LOAD_NAND: - return SelectAtomic(Node, - AArch64::ATOMIC_LOAD_NAND_I8, - AArch64::ATOMIC_LOAD_NAND_I16, - AArch64::ATOMIC_LOAD_NAND_I32, - AArch64::ATOMIC_LOAD_NAND_I64); - case ISD::ATOMIC_LOAD_MIN: - return SelectAtomic(Node, - AArch64::ATOMIC_LOAD_MIN_I8, - AArch64::ATOMIC_LOAD_MIN_I16, - AArch64::ATOMIC_LOAD_MIN_I32, - AArch64::ATOMIC_LOAD_MIN_I64); - case ISD::ATOMIC_LOAD_MAX: - return SelectAtomic(Node, - AArch64::ATOMIC_LOAD_MAX_I8, - AArch64::ATOMIC_LOAD_MAX_I16, - AArch64::ATOMIC_LOAD_MAX_I32, - AArch64::ATOMIC_LOAD_MAX_I64); - case ISD::ATOMIC_LOAD_UMIN: - return SelectAtomic(Node, - AArch64::ATOMIC_LOAD_UMIN_I8, - AArch64::ATOMIC_LOAD_UMIN_I16, - AArch64::ATOMIC_LOAD_UMIN_I32, - AArch64::ATOMIC_LOAD_UMIN_I64); - case ISD::ATOMIC_LOAD_UMAX: - return SelectAtomic(Node, - AArch64::ATOMIC_LOAD_UMAX_I8, - AArch64::ATOMIC_LOAD_UMAX_I16, - AArch64::ATOMIC_LOAD_UMAX_I32, - AArch64::ATOMIC_LOAD_UMAX_I64); - case ISD::ATOMIC_SWAP: - return SelectAtomic(Node, - AArch64::ATOMIC_SWAP_I8, - AArch64::ATOMIC_SWAP_I16, - AArch64::ATOMIC_SWAP_I32, - AArch64::ATOMIC_SWAP_I64); - case ISD::ATOMIC_CMP_SWAP: - return SelectAtomic(Node, - AArch64::ATOMIC_CMP_SWAP_I8, - AArch64::ATOMIC_CMP_SWAP_I16, - AArch64::ATOMIC_CMP_SWAP_I32, - AArch64::ATOMIC_CMP_SWAP_I64); - case ISD::FrameIndex: { - int FI = cast(Node)->getIndex(); - EVT PtrTy = getTargetLowering()->getPointerTy(); - SDValue TFI = CurDAG->getTargetFrameIndex(FI, PtrTy); - return CurDAG->SelectNodeTo(Node, AArch64::ADDxxi_lsl0_s, PtrTy, - TFI, CurDAG->getTargetConstant(0, PtrTy)); + // Remember if it is worth folding N when it produces extended register. + bool IsExtendedRegisterWorthFolding = isWorthFolding(N); + + // Try to match a shifted extend on the RHS. + if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL && + SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) { + Base = LHS; + DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32); + return true; } - case ISD::ConstantPool: { - // Constant pools are fine, just create a Target entry. - ConstantPoolSDNode *CN = cast(Node); - const Constant *C = CN->getConstVal(); - SDValue CP = CurDAG->getTargetConstantPool(C, CN->getValueType(0)); - ReplaceUses(SDValue(Node, 0), CP); - return NULL; + // Try to match a shifted extend on the LHS. + if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL && + SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) { + Base = RHS; + DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32); + return true; } - case ISD::Constant: { - SDNode *ResNode = 0; - if (cast(Node)->getZExtValue() == 0) { - // XZR and WZR are probably even better than an actual move: most of the - // time they can be folded into another instruction with *no* cost. - - EVT Ty = Node->getValueType(0); - assert((Ty == MVT::i32 || Ty == MVT::i64) && "unexpected type"); - uint16_t Register = Ty == MVT::i32 ? AArch64::WZR : AArch64::XZR; - ResNode = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), - SDLoc(Node), - Register, Ty).getNode(); - } - // Next best option is a move-immediate, see if we can do that. - if (!ResNode) { - ResNode = TrySelectToMoveImm(Node); - } + // Match any non-shifted, non-extend, non-immediate add expression. + Base = LHS; + Offset = RHS; + SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32); + DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32); + // Reg1 + Reg2 is free: no check needed. + return true; +} - if (ResNode) - return ResNode; +SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef Regs) { + static const unsigned RegClassIDs[] = { + AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID}; + static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1, + AArch64::dsub2, AArch64::dsub3}; - // If even that fails we fall back to a lit-pool entry at the moment. Future - // tuning may change this to a sequence of MOVZ/MOVN/MOVK instructions. - ResNode = SelectToLitPool(Node); - assert(ResNode && "We need *some* way to materialise a constant"); + return createTuple(Regs, RegClassIDs, SubRegs); +} - // We want to continue selection at this point since the litpool access - // generated used generic nodes for simplicity. - ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0)); - Node = ResNode; - break; - } - case ISD::ConstantFP: { - if (A64Imms::isFPImm(cast(Node)->getValueAPF())) { - // FMOV will take care of it from TableGen - break; - } +SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef Regs) { + static const unsigned RegClassIDs[] = { + AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID}; + static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1, + AArch64::qsub2, AArch64::qsub3}; - SDNode *ResNode = LowerToFPLitPool(Node); - ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0)); + return createTuple(Regs, RegClassIDs, SubRegs); +} - // We want to continue selection at this point since the litpool access - // generated used generic nodes for simplicity. - Node = ResNode; - break; - } - case AArch64ISD::NEON_LD1_UPD: { - static const uint16_t Opcodes[] = { - AArch64::LD1WB_8B_fixed, AArch64::LD1WB_4H_fixed, - AArch64::LD1WB_2S_fixed, AArch64::LD1WB_1D_fixed, - AArch64::LD1WB_16B_fixed, AArch64::LD1WB_8H_fixed, - AArch64::LD1WB_4S_fixed, AArch64::LD1WB_2D_fixed - }; - return SelectVLD(Node, 1, true, Opcodes); - } - case AArch64ISD::NEON_LD2_UPD: { - static const uint16_t Opcodes[] = { - AArch64::LD2WB_8B_fixed, AArch64::LD2WB_4H_fixed, - AArch64::LD2WB_2S_fixed, AArch64::LD1WB2V_1D_fixed, - AArch64::LD2WB_16B_fixed, AArch64::LD2WB_8H_fixed, - AArch64::LD2WB_4S_fixed, AArch64::LD2WB_2D_fixed - }; - return SelectVLD(Node, 2, true, Opcodes); - } - case AArch64ISD::NEON_LD3_UPD: { - static const uint16_t Opcodes[] = { - AArch64::LD3WB_8B_fixed, AArch64::LD3WB_4H_fixed, - AArch64::LD3WB_2S_fixed, AArch64::LD1WB3V_1D_fixed, - AArch64::LD3WB_16B_fixed, AArch64::LD3WB_8H_fixed, - AArch64::LD3WB_4S_fixed, AArch64::LD3WB_2D_fixed - }; - return SelectVLD(Node, 3, true, Opcodes); - } - case AArch64ISD::NEON_LD4_UPD: { - static const uint16_t Opcodes[] = { - AArch64::LD4WB_8B_fixed, AArch64::LD4WB_4H_fixed, - AArch64::LD4WB_2S_fixed, AArch64::LD1WB4V_1D_fixed, - AArch64::LD4WB_16B_fixed, AArch64::LD4WB_8H_fixed, - AArch64::LD4WB_4S_fixed, AArch64::LD4WB_2D_fixed - }; - return SelectVLD(Node, 4, true, Opcodes); - } - case AArch64ISD::NEON_ST1_UPD: { - static const uint16_t Opcodes[] = { - AArch64::ST1WB_8B_fixed, AArch64::ST1WB_4H_fixed, - AArch64::ST1WB_2S_fixed, AArch64::ST1WB_1D_fixed, - AArch64::ST1WB_16B_fixed, AArch64::ST1WB_8H_fixed, - AArch64::ST1WB_4S_fixed, AArch64::ST1WB_2D_fixed - }; - return SelectVST(Node, 1, true, Opcodes); - } - case AArch64ISD::NEON_ST2_UPD: { - static const uint16_t Opcodes[] = { - AArch64::ST2WB_8B_fixed, AArch64::ST2WB_4H_fixed, - AArch64::ST2WB_2S_fixed, AArch64::ST1WB2V_1D_fixed, - AArch64::ST2WB_16B_fixed, AArch64::ST2WB_8H_fixed, - AArch64::ST2WB_4S_fixed, AArch64::ST2WB_2D_fixed - }; - return SelectVST(Node, 2, true, Opcodes); - } - case AArch64ISD::NEON_ST3_UPD: { - static const uint16_t Opcodes[] = { - AArch64::ST3WB_8B_fixed, AArch64::ST3WB_4H_fixed, - AArch64::ST3WB_2S_fixed, AArch64::ST1WB3V_1D_fixed, - AArch64::ST3WB_16B_fixed, AArch64::ST3WB_8H_fixed, - AArch64::ST3WB_4S_fixed, AArch64::ST3WB_2D_fixed - }; - return SelectVST(Node, 3, true, Opcodes); - } - case AArch64ISD::NEON_ST4_UPD: { - static const uint16_t Opcodes[] = { - AArch64::ST4WB_8B_fixed, AArch64::ST4WB_4H_fixed, - AArch64::ST4WB_2S_fixed, AArch64::ST1WB4V_1D_fixed, - AArch64::ST4WB_16B_fixed, AArch64::ST4WB_8H_fixed, - AArch64::ST4WB_4S_fixed, AArch64::ST4WB_2D_fixed - }; - return SelectVST(Node, 4, true, Opcodes); - } - case ISD::INTRINSIC_VOID: - case ISD::INTRINSIC_W_CHAIN: { - unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); - switch (IntNo) { - default: - break; +SDValue AArch64DAGToDAGISel::createTuple(ArrayRef Regs, + const unsigned RegClassIDs[], + const unsigned SubRegs[]) { + // There's no special register-class for a vector-list of 1 element: it's just + // a vector. + if (Regs.size() == 1) + return Regs[0]; - case Intrinsic::arm_neon_vld1: { - static const uint16_t Opcodes[] = { AArch64::LD1_8B, AArch64::LD1_4H, - AArch64::LD1_2S, AArch64::LD1_1D, - AArch64::LD1_16B, AArch64::LD1_8H, - AArch64::LD1_4S, AArch64::LD1_2D }; - return SelectVLD(Node, 1, false, Opcodes); - } - case Intrinsic::arm_neon_vld2: { - static const uint16_t Opcodes[] = { AArch64::LD2_8B, AArch64::LD2_4H, - AArch64::LD2_2S, AArch64::LD1_2V_1D, - AArch64::LD2_16B, AArch64::LD2_8H, - AArch64::LD2_4S, AArch64::LD2_2D }; - return SelectVLD(Node, 2, false, Opcodes); - } - case Intrinsic::arm_neon_vld3: { - static const uint16_t Opcodes[] = { AArch64::LD3_8B, AArch64::LD3_4H, - AArch64::LD3_2S, AArch64::LD1_3V_1D, - AArch64::LD3_16B, AArch64::LD3_8H, - AArch64::LD3_4S, AArch64::LD3_2D }; - return SelectVLD(Node, 3, false, Opcodes); - } - case Intrinsic::arm_neon_vld4: { - static const uint16_t Opcodes[] = { AArch64::LD4_8B, AArch64::LD4_4H, - AArch64::LD4_2S, AArch64::LD1_4V_1D, - AArch64::LD4_16B, AArch64::LD4_8H, - AArch64::LD4_4S, AArch64::LD4_2D }; - return SelectVLD(Node, 4, false, Opcodes); - } - case Intrinsic::arm_neon_vst1: { - static const uint16_t Opcodes[] = { AArch64::ST1_8B, AArch64::ST1_4H, - AArch64::ST1_2S, AArch64::ST1_1D, - AArch64::ST1_16B, AArch64::ST1_8H, - AArch64::ST1_4S, AArch64::ST1_2D }; - return SelectVST(Node, 1, false, Opcodes); - } - case Intrinsic::arm_neon_vst2: { - static const uint16_t Opcodes[] = { AArch64::ST2_8B, AArch64::ST2_4H, - AArch64::ST2_2S, AArch64::ST1_2V_1D, - AArch64::ST2_16B, AArch64::ST2_8H, - AArch64::ST2_4S, AArch64::ST2_2D }; - return SelectVST(Node, 2, false, Opcodes); - } - case Intrinsic::arm_neon_vst3: { - static const uint16_t Opcodes[] = { AArch64::ST3_8B, AArch64::ST3_4H, - AArch64::ST3_2S, AArch64::ST1_3V_1D, - AArch64::ST3_16B, AArch64::ST3_8H, - AArch64::ST3_4S, AArch64::ST3_2D }; - return SelectVST(Node, 3, false, Opcodes); - } - case Intrinsic::arm_neon_vst4: { - static const uint16_t Opcodes[] = { AArch64::ST4_8B, AArch64::ST4_4H, - AArch64::ST4_2S, AArch64::ST1_4V_1D, - AArch64::ST4_16B, AArch64::ST4_8H, - AArch64::ST4_4S, AArch64::ST4_2D }; - return SelectVST(Node, 4, false, Opcodes); - } - } - break; - } - default: - break; // Let generic code handle it - } + assert(Regs.size() >= 2 && Regs.size() <= 4); - SDNode *ResNode = SelectCode(Node); + SDLoc DL(Regs[0]); + + SmallVector Ops; - DEBUG(dbgs() << "=> "; - if (ResNode == NULL || ResNode == Node) - Node->dump(CurDAG); - else - ResNode->dump(CurDAG); - dbgs() << "\n"); + // First operand of REG_SEQUENCE is the desired RegClass. + Ops.push_back( + CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32)); + + // Then we get pairs of source & subregister-position for the components. + for (unsigned i = 0; i < Regs.size(); ++i) { + Ops.push_back(Regs[i]); + Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32)); + } + + SDNode *N = + CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops); + return SDValue(N, 0); +} + +SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, + unsigned Opc, bool isExt) { + SDLoc dl(N); + EVT VT = N->getValueType(0); + + unsigned ExtOff = isExt; + + // Form a REG_SEQUENCE to force register allocation. + unsigned Vec0Off = ExtOff + 1; + SmallVector Regs(N->op_begin() + Vec0Off, + N->op_begin() + Vec0Off + NumVecs); + SDValue RegSeq = createQTuple(Regs); + + SmallVector Ops; + if (isExt) + Ops.push_back(N->getOperand(1)); + Ops.push_back(RegSeq); + Ops.push_back(N->getOperand(NumVecs + ExtOff + 1)); + return CurDAG->getMachineNode(Opc, dl, VT, Ops); +} + +SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) { + LoadSDNode *LD = cast(N); + if (LD->isUnindexed()) + return nullptr; + EVT VT = LD->getMemoryVT(); + EVT DstVT = N->getValueType(0); + ISD::MemIndexedMode AM = LD->getAddressingMode(); + bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC; + + // We're not doing validity checking here. That was done when checking + // if we should mark the load as indexed or not. We're just selecting + // the right instruction. + unsigned Opcode = 0; + + ISD::LoadExtType ExtType = LD->getExtensionType(); + bool InsertTo64 = false; + if (VT == MVT::i64) + Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost; + else if (VT == MVT::i32) { + if (ExtType == ISD::NON_EXTLOAD) + Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost; + else if (ExtType == ISD::SEXTLOAD) + Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost; + else { + Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost; + InsertTo64 = true; + // The result of the load is only i32. It's the subreg_to_reg that makes + // it into an i64. + DstVT = MVT::i32; + } + } else if (VT == MVT::i16) { + if (ExtType == ISD::SEXTLOAD) { + if (DstVT == MVT::i64) + Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost; + else + Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost; + } else { + Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost; + InsertTo64 = DstVT == MVT::i64; + // The result of the load is only i32. It's the subreg_to_reg that makes + // it into an i64. + DstVT = MVT::i32; + } + } else if (VT == MVT::i8) { + if (ExtType == ISD::SEXTLOAD) { + if (DstVT == MVT::i64) + Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost; + else + Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost; + } else { + Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost; + InsertTo64 = DstVT == MVT::i64; + // The result of the load is only i32. It's the subreg_to_reg that makes + // it into an i64. + DstVT = MVT::i32; + } + } else if (VT == MVT::f16) { + Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost; + } else if (VT == MVT::f32) { + Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost; + } else if (VT == MVT::f64 || VT.is64BitVector()) { + Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost; + } else if (VT.is128BitVector()) { + Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost; + } else + return nullptr; + SDValue Chain = LD->getChain(); + SDValue Base = LD->getBasePtr(); + ConstantSDNode *OffsetOp = cast(LD->getOffset()); + int OffsetVal = (int)OffsetOp->getZExtValue(); + SDLoc dl(N); + SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64); + SDValue Ops[] = { Base, Offset, Chain }; + SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT, + MVT::Other, Ops); + // Either way, we're replacing the node, so tell the caller that. + Done = true; + SDValue LoadedVal = SDValue(Res, 1); + if (InsertTo64) { + SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32); + LoadedVal = + SDValue(CurDAG->getMachineNode( + AArch64::SUBREG_TO_REG, dl, MVT::i64, + CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal, + SubReg), + 0); + } + + ReplaceUses(SDValue(N, 0), LoadedVal); + ReplaceUses(SDValue(N, 1), SDValue(Res, 0)); + ReplaceUses(SDValue(N, 2), SDValue(Res, 2)); + + return nullptr; +} + +SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, + unsigned Opc, unsigned SubRegIdx) { + SDLoc dl(N); + EVT VT = N->getValueType(0); + SDValue Chain = N->getOperand(0); + + SDValue Ops[] = {N->getOperand(2), // Mem operand; + Chain}; + + const EVT ResTys[] = {MVT::Untyped, MVT::Other}; + + SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); + SDValue SuperReg = SDValue(Ld, 0); + for (unsigned i = 0; i < NumVecs; ++i) + ReplaceUses(SDValue(N, i), + CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg)); + + ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1)); + return nullptr; +} + +SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs, + unsigned Opc, unsigned SubRegIdx) { + SDLoc dl(N); + EVT VT = N->getValueType(0); + SDValue Chain = N->getOperand(0); + + SDValue Ops[] = {N->getOperand(1), // Mem operand + N->getOperand(2), // Incremental + Chain}; + + const EVT ResTys[] = {MVT::i64, // Type of the write back register + MVT::Untyped, MVT::Other}; + + SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); + + // Update uses of write back register + ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0)); + + // Update uses of vector list + SDValue SuperReg = SDValue(Ld, 1); + if (NumVecs == 1) + ReplaceUses(SDValue(N, 0), SuperReg); + else + for (unsigned i = 0; i < NumVecs; ++i) + ReplaceUses(SDValue(N, i), + CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg)); + + // Update the chain + ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2)); + return nullptr; +} + +SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs, + unsigned Opc) { + SDLoc dl(N); + EVT VT = N->getOperand(2)->getValueType(0); + + // Form a REG_SEQUENCE to force register allocation. + bool Is128Bit = VT.getSizeInBits() == 128; + SmallVector Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs); + SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs); + + SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)}; + SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops); + + return St; +} + +SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs, + unsigned Opc) { + SDLoc dl(N); + EVT VT = N->getOperand(2)->getValueType(0); + const EVT ResTys[] = {MVT::i64, // Type of the write back register + MVT::Other}; // Type for the Chain + + // Form a REG_SEQUENCE to force register allocation. + bool Is128Bit = VT.getSizeInBits() == 128; + SmallVector Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs); + SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs); + + SDValue Ops[] = {RegSeq, + N->getOperand(NumVecs + 1), // base register + N->getOperand(NumVecs + 2), // Incremental + N->getOperand(0)}; // Chain + SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); + + return St; +} + +namespace { +/// WidenVector - Given a value in the V64 register class, produce the +/// equivalent value in the V128 register class. +class WidenVector { + SelectionDAG &DAG; + +public: + WidenVector(SelectionDAG &DAG) : DAG(DAG) {} + + SDValue operator()(SDValue V64Reg) { + EVT VT = V64Reg.getValueType(); + unsigned NarrowSize = VT.getVectorNumElements(); + MVT EltTy = VT.getVectorElementType().getSimpleVT(); + MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize); + SDLoc DL(V64Reg); + + SDValue Undef = + SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0); + return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg); + } +}; +} // namespace + +/// NarrowVector - Given a value in the V128 register class, produce the +/// equivalent value in the V64 register class. +static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) { + EVT VT = V128Reg.getValueType(); + unsigned WideSize = VT.getVectorNumElements(); + MVT EltTy = VT.getVectorElementType().getSimpleVT(); + MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2); + + return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy, + V128Reg); +} + +SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs, + unsigned Opc) { + SDLoc dl(N); + EVT VT = N->getValueType(0); + bool Narrow = VT.getSizeInBits() == 64; + + // Form a REG_SEQUENCE to force register allocation. + SmallVector Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs); + + if (Narrow) + std::transform(Regs.begin(), Regs.end(), Regs.begin(), + WidenVector(*CurDAG)); + + SDValue RegSeq = createQTuple(Regs); + + const EVT ResTys[] = {MVT::Untyped, MVT::Other}; + + unsigned LaneNo = + cast(N->getOperand(NumVecs + 2))->getZExtValue(); + + SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64), + N->getOperand(NumVecs + 3), N->getOperand(0)}; + SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); + SDValue SuperReg = SDValue(Ld, 0); + + EVT WideVT = RegSeq.getOperand(1)->getValueType(0); + static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2, + AArch64::qsub3 }; + for (unsigned i = 0; i < NumVecs; ++i) { + SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg); + if (Narrow) + NV = NarrowVector(NV, *CurDAG); + ReplaceUses(SDValue(N, i), NV); + } + + ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1)); + + return Ld; +} + +SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs, + unsigned Opc) { + SDLoc dl(N); + EVT VT = N->getValueType(0); + bool Narrow = VT.getSizeInBits() == 64; + + // Form a REG_SEQUENCE to force register allocation. + SmallVector Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs); + + if (Narrow) + std::transform(Regs.begin(), Regs.end(), Regs.begin(), + WidenVector(*CurDAG)); + + SDValue RegSeq = createQTuple(Regs); + + const EVT ResTys[] = {MVT::i64, // Type of the write back register + RegSeq->getValueType(0), MVT::Other}; + + unsigned LaneNo = + cast(N->getOperand(NumVecs + 1))->getZExtValue(); + + SDValue Ops[] = {RegSeq, + CurDAG->getTargetConstant(LaneNo, dl, + MVT::i64), // Lane Number + N->getOperand(NumVecs + 2), // Base register + N->getOperand(NumVecs + 3), // Incremental + N->getOperand(0)}; + SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); + + // Update uses of the write back register + ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0)); + + // Update uses of the vector list + SDValue SuperReg = SDValue(Ld, 1); + if (NumVecs == 1) { + ReplaceUses(SDValue(N, 0), + Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg); + } else { + EVT WideVT = RegSeq.getOperand(1)->getValueType(0); + static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2, + AArch64::qsub3 }; + for (unsigned i = 0; i < NumVecs; ++i) { + SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, + SuperReg); + if (Narrow) + NV = NarrowVector(NV, *CurDAG); + ReplaceUses(SDValue(N, i), NV); + } + } + + // Update the Chain + ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2)); + + return Ld; +} + +SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs, + unsigned Opc) { + SDLoc dl(N); + EVT VT = N->getOperand(2)->getValueType(0); + bool Narrow = VT.getSizeInBits() == 64; + + // Form a REG_SEQUENCE to force register allocation. + SmallVector Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs); + + if (Narrow) + std::transform(Regs.begin(), Regs.end(), Regs.begin(), + WidenVector(*CurDAG)); + + SDValue RegSeq = createQTuple(Regs); + + unsigned LaneNo = + cast(N->getOperand(NumVecs + 2))->getZExtValue(); + + SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64), + N->getOperand(NumVecs + 3), N->getOperand(0)}; + SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops); + + // Transfer memoperands. + MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); + MemOp[0] = cast(N)->getMemOperand(); + cast(St)->setMemRefs(MemOp, MemOp + 1); + + return St; +} + +SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs, + unsigned Opc) { + SDLoc dl(N); + EVT VT = N->getOperand(2)->getValueType(0); + bool Narrow = VT.getSizeInBits() == 64; + + // Form a REG_SEQUENCE to force register allocation. + SmallVector Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs); + + if (Narrow) + std::transform(Regs.begin(), Regs.end(), Regs.begin(), + WidenVector(*CurDAG)); + + SDValue RegSeq = createQTuple(Regs); + + const EVT ResTys[] = {MVT::i64, // Type of the write back register + MVT::Other}; + + unsigned LaneNo = + cast(N->getOperand(NumVecs + 1))->getZExtValue(); + + SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64), + N->getOperand(NumVecs + 2), // Base Register + N->getOperand(NumVecs + 3), // Incremental + N->getOperand(0)}; + SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); + + // Transfer memoperands. + MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); + MemOp[0] = cast(N)->getMemOperand(); + cast(St)->setMemRefs(MemOp, MemOp + 1); + + return St; +} + +static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N, + unsigned &Opc, SDValue &Opd0, + unsigned &LSB, unsigned &MSB, + unsigned NumberOfIgnoredLowBits, + bool BiggerPattern) { + assert(N->getOpcode() == ISD::AND && + "N must be a AND operation to call this function"); + + EVT VT = N->getValueType(0); + + // Here we can test the type of VT and return false when the type does not + // match, but since it is done prior to that call in the current context + // we turned that into an assert to avoid redundant code. + assert((VT == MVT::i32 || VT == MVT::i64) && + "Type checking must have been done before calling this function"); + + // FIXME: simplify-demanded-bits in DAGCombine will probably have + // changed the AND node to a 32-bit mask operation. We'll have to + // undo that as part of the transform here if we want to catch all + // the opportunities. + // Currently the NumberOfIgnoredLowBits argument helps to recover + // form these situations when matching bigger pattern (bitfield insert). + + // For unsigned extracts, check for a shift right and mask + uint64_t And_imm = 0; + if (!isOpcWithIntImmediate(N, ISD::AND, And_imm)) + return false; + + const SDNode *Op0 = N->getOperand(0).getNode(); + + // Because of simplify-demanded-bits in DAGCombine, the mask may have been + // simplified. Try to undo that + And_imm |= (1 << NumberOfIgnoredLowBits) - 1; + + // The immediate is a mask of the low bits iff imm & (imm+1) == 0 + if (And_imm & (And_imm + 1)) + return false; + + bool ClampMSB = false; + uint64_t Srl_imm = 0; + // Handle the SRL + ANY_EXTEND case. + if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND && + isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) { + // Extend the incoming operand of the SRL to 64-bit. + Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0)); + // Make sure to clamp the MSB so that we preserve the semantics of the + // original operations. + ClampMSB = true; + } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE && + isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, + Srl_imm)) { + // If the shift result was truncated, we can still combine them. + Opd0 = Op0->getOperand(0).getOperand(0); + + // Use the type of SRL node. + VT = Opd0->getValueType(0); + } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) { + Opd0 = Op0->getOperand(0); + } else if (BiggerPattern) { + // Let's pretend a 0 shift right has been performed. + // The resulting code will be at least as good as the original one + // plus it may expose more opportunities for bitfield insert pattern. + // FIXME: Currently we limit this to the bigger pattern, because + // some optimizations expect AND and not UBFM + Opd0 = N->getOperand(0); + } else + return false; + + // Bail out on large immediates. This happens when no proper + // combining/constant folding was performed. + if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) { + DEBUG((dbgs() << N + << ": Found large shift immediate, this should not happen\n")); + return false; + } + + LSB = Srl_imm; + MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes(And_imm) + : countTrailingOnes(And_imm)) - + 1; + if (ClampMSB) + // Since we're moving the extend before the right shift operation, we need + // to clamp the MSB to make sure we don't shift in undefined bits instead of + // the zeros which would get shifted in with the original right shift + // operation. + MSB = MSB > 31 ? 31 : MSB; + + Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri; + return true; +} + +static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc, + SDValue &Opd0, unsigned &LSB, + unsigned &MSB) { + // We are looking for the following pattern which basically extracts several + // continuous bits from the source value and places it from the LSB of the + // destination value, all other bits of the destination value or set to zero: + // + // Value2 = AND Value, MaskImm + // SRL Value2, ShiftImm + // + // with MaskImm >> ShiftImm to search for the bit width. + // + // This gets selected into a single UBFM: + // + // UBFM Value, ShiftImm, BitWide + Srl_imm -1 + // + + if (N->getOpcode() != ISD::SRL) + return false; + + uint64_t And_mask = 0; + if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask)) + return false; + + Opd0 = N->getOperand(0).getOperand(0); + + uint64_t Srl_imm = 0; + if (!isIntImmediate(N->getOperand(1), Srl_imm)) + return false; + + // Check whether we really have several bits extract here. + unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm)); + if (BitWide && isMask_64(And_mask >> Srl_imm)) { + if (N->getValueType(0) == MVT::i32) + Opc = AArch64::UBFMWri; + else + Opc = AArch64::UBFMXri; + + LSB = Srl_imm; + MSB = BitWide + Srl_imm - 1; + return true; + } + + return false; +} + +static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0, + unsigned &Immr, unsigned &Imms, + bool BiggerPattern) { + assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && + "N must be a SHR/SRA operation to call this function"); + + EVT VT = N->getValueType(0); + + // Here we can test the type of VT and return false when the type does not + // match, but since it is done prior to that call in the current context + // we turned that into an assert to avoid redundant code. + assert((VT == MVT::i32 || VT == MVT::i64) && + "Type checking must have been done before calling this function"); + + // Check for AND + SRL doing several bits extract. + if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms)) + return true; + + // we're looking for a shift of a shift + uint64_t Shl_imm = 0; + uint64_t Trunc_bits = 0; + if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) { + Opd0 = N->getOperand(0).getOperand(0); + } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL && + N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) { + // We are looking for a shift of truncate. Truncate from i64 to i32 could + // be considered as setting high 32 bits as zero. Our strategy here is to + // always generate 64bit UBFM. This consistency will help the CSE pass + // later find more redundancy. + Opd0 = N->getOperand(0).getOperand(0); + Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits(); + VT = Opd0->getValueType(0); + assert(VT == MVT::i64 && "the promoted type should be i64"); + } else if (BiggerPattern) { + // Let's pretend a 0 shift left has been performed. + // FIXME: Currently we limit this to the bigger pattern case, + // because some optimizations expect AND and not UBFM + Opd0 = N->getOperand(0); + } else + return false; + + // Missing combines/constant folding may have left us with strange + // constants. + if (Shl_imm >= VT.getSizeInBits()) { + DEBUG((dbgs() << N + << ": Found large shift immediate, this should not happen\n")); + return false; + } + + uint64_t Srl_imm = 0; + if (!isIntImmediate(N->getOperand(1), Srl_imm)) + return false; + + assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() && + "bad amount in shift node!"); + int immr = Srl_imm - Shl_imm; + Immr = immr < 0 ? immr + VT.getSizeInBits() : immr; + Imms = VT.getSizeInBits() - Shl_imm - Trunc_bits - 1; + // SRA requires a signed extraction + if (VT == MVT::i32) + Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri; + else + Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri; + return true; +} + +static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc, + SDValue &Opd0, unsigned &Immr, unsigned &Imms, + unsigned NumberOfIgnoredLowBits = 0, + bool BiggerPattern = false) { + if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64) + return false; + + switch (N->getOpcode()) { + default: + if (!N->isMachineOpcode()) + return false; + break; + case ISD::AND: + return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms, + NumberOfIgnoredLowBits, BiggerPattern); + case ISD::SRL: + case ISD::SRA: + return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern); + } + + unsigned NOpc = N->getMachineOpcode(); + switch (NOpc) { + default: + return false; + case AArch64::SBFMWri: + case AArch64::UBFMWri: + case AArch64::SBFMXri: + case AArch64::UBFMXri: + Opc = NOpc; + Opd0 = N->getOperand(0); + Immr = cast(N->getOperand(1).getNode())->getZExtValue(); + Imms = cast(N->getOperand(2).getNode())->getZExtValue(); + return true; + } + // Unreachable + return false; +} + +SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) { + unsigned Opc, Immr, Imms; + SDValue Opd0; + if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms)) + return nullptr; + + EVT VT = N->getValueType(0); + SDLoc dl(N); + + // If the bit extract operation is 64bit but the original type is 32bit, we + // need to add one EXTRACT_SUBREG. + if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) { + SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64), + CurDAG->getTargetConstant(Imms, dl, MVT::i64)}; + + SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64); + SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32); + MachineSDNode *Node = + CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32, + SDValue(BFM, 0), SubReg); + return Node; + } + + SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT), + CurDAG->getTargetConstant(Imms, dl, VT)}; + return CurDAG->SelectNodeTo(N, Opc, VT, Ops); +} + +/// Does DstMask form a complementary pair with the mask provided by +/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking, +/// this asks whether DstMask zeroes precisely those bits that will be set by +/// the other half. +static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted, + unsigned NumberOfIgnoredHighBits, EVT VT) { + assert((VT == MVT::i32 || VT == MVT::i64) && + "i32 or i64 mask type expected!"); + unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits; + + APInt SignificantDstMask = APInt(BitWidth, DstMask); + APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth); + + return (SignificantDstMask & SignificantBitsToBeInserted) == 0 && + (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue(); +} + +// Look for bits that will be useful for later uses. +// A bit is consider useless as soon as it is dropped and never used +// before it as been dropped. +// E.g., looking for useful bit of x +// 1. y = x & 0x7 +// 2. z = y >> 2 +// After #1, x useful bits are 0x7, then the useful bits of x, live through +// y. +// After #2, the useful bits of x are 0x4. +// However, if x is used on an unpredicatable instruction, then all its bits +// are useful. +// E.g. +// 1. y = x & 0x7 +// 2. z = y >> 2 +// 3. str x, [@x] +static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0); + +static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits, + unsigned Depth) { + uint64_t Imm = + cast(Op.getOperand(1).getNode())->getZExtValue(); + Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth()); + UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm); + getUsefulBits(Op, UsefulBits, Depth + 1); +} + +static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits, + uint64_t Imm, uint64_t MSB, + unsigned Depth) { + // inherit the bitwidth value + APInt OpUsefulBits(UsefulBits); + OpUsefulBits = 1; + + if (MSB >= Imm) { + OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1); + --OpUsefulBits; + // The interesting part will be in the lower part of the result + getUsefulBits(Op, OpUsefulBits, Depth + 1); + // The interesting part was starting at Imm in the argument + OpUsefulBits = OpUsefulBits.shl(Imm); + } else { + OpUsefulBits = OpUsefulBits.shl(MSB + 1); + --OpUsefulBits; + // The interesting part will be shifted in the result + OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm); + getUsefulBits(Op, OpUsefulBits, Depth + 1); + // The interesting part was at zero in the argument + OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm); + } + + UsefulBits &= OpUsefulBits; +} + +static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits, + unsigned Depth) { + uint64_t Imm = + cast(Op.getOperand(1).getNode())->getZExtValue(); + uint64_t MSB = + cast(Op.getOperand(2).getNode())->getZExtValue(); + + getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth); +} + +static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits, + unsigned Depth) { + uint64_t ShiftTypeAndValue = + cast(Op.getOperand(2).getNode())->getZExtValue(); + APInt Mask(UsefulBits); + Mask.clearAllBits(); + Mask.flipAllBits(); + + if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) { + // Shift Left + uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); + Mask = Mask.shl(ShiftAmt); + getUsefulBits(Op, Mask, Depth + 1); + Mask = Mask.lshr(ShiftAmt); + } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) { + // Shift Right + // We do not handle AArch64_AM::ASR, because the sign will change the + // number of useful bits + uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); + Mask = Mask.lshr(ShiftAmt); + getUsefulBits(Op, Mask, Depth + 1); + Mask = Mask.shl(ShiftAmt); + } else + return; + + UsefulBits &= Mask; +} + +static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits, + unsigned Depth) { + uint64_t Imm = + cast(Op.getOperand(2).getNode())->getZExtValue(); + uint64_t MSB = + cast(Op.getOperand(3).getNode())->getZExtValue(); + + if (Op.getOperand(1) == Orig) + return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth); + + APInt OpUsefulBits(UsefulBits); + OpUsefulBits = 1; + + if (MSB >= Imm) { + OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1); + --OpUsefulBits; + UsefulBits &= ~OpUsefulBits; + getUsefulBits(Op, UsefulBits, Depth + 1); + } else { + OpUsefulBits = OpUsefulBits.shl(MSB + 1); + --OpUsefulBits; + UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm)); + getUsefulBits(Op, UsefulBits, Depth + 1); + } +} + +static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits, + SDValue Orig, unsigned Depth) { + + // Users of this node should have already been instruction selected + // FIXME: Can we turn that into an assert? + if (!UserNode->isMachineOpcode()) + return; + + switch (UserNode->getMachineOpcode()) { + default: + return; + case AArch64::ANDSWri: + case AArch64::ANDSXri: + case AArch64::ANDWri: + case AArch64::ANDXri: + // We increment Depth only when we call the getUsefulBits + return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits, + Depth); + case AArch64::UBFMWri: + case AArch64::UBFMXri: + return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth); + + case AArch64::ORRWrs: + case AArch64::ORRXrs: + if (UserNode->getOperand(1) != Orig) + return; + return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits, + Depth); + case AArch64::BFMWri: + case AArch64::BFMXri: + return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth); + } +} + +static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) { + if (Depth >= 6) + return; + // Initialize UsefulBits + if (!Depth) { + unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits(); + // At the beginning, assume every produced bits is useful + UsefulBits = APInt(Bitwidth, 0); + UsefulBits.flipAllBits(); + } + APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0); + + for (SDNode *Node : Op.getNode()->uses()) { + // A use cannot produce useful bits + APInt UsefulBitsForUse = APInt(UsefulBits); + getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth); + UsersUsefulBits |= UsefulBitsForUse; + } + // UsefulBits contains the produced bits that are meaningful for the + // current definition, thus a user cannot make a bit meaningful at + // this point + UsefulBits &= UsersUsefulBits; +} + +/// Create a machine node performing a notional SHL of Op by ShlAmount. If +/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is +/// 0, return Op unchanged. +static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) { + if (ShlAmount == 0) + return Op; + + EVT VT = Op.getValueType(); + SDLoc dl(Op); + unsigned BitWidth = VT.getSizeInBits(); + unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri; + + SDNode *ShiftNode; + if (ShlAmount > 0) { + // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt + ShiftNode = CurDAG->getMachineNode( + UBFMOpc, dl, VT, Op, + CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT), + CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT)); + } else { + // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1 + assert(ShlAmount < 0 && "expected right shift"); + int ShrAmount = -ShlAmount; + ShiftNode = CurDAG->getMachineNode( + UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT), + CurDAG->getTargetConstant(BitWidth - 1, dl, VT)); + } + + return SDValue(ShiftNode, 0); +} + +/// Does this tree qualify as an attempt to move a bitfield into position, +/// essentially "(and (shl VAL, N), Mask)". +static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op, + SDValue &Src, int &ShiftAmount, + int &MaskWidth) { + EVT VT = Op.getValueType(); + unsigned BitWidth = VT.getSizeInBits(); + (void)BitWidth; + assert(BitWidth == 32 || BitWidth == 64); + + APInt KnownZero, KnownOne; + CurDAG->computeKnownBits(Op, KnownZero, KnownOne); + + // Non-zero in the sense that they're not provably zero, which is the key + // point if we want to use this value + uint64_t NonZeroBits = (~KnownZero).getZExtValue(); + + // Discard a constant AND mask if present. It's safe because the node will + // already have been factored into the computeKnownBits calculation above. + uint64_t AndImm; + if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) { + assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0); + Op = Op.getOperand(0); + } + + uint64_t ShlImm; + if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm)) + return false; + Op = Op.getOperand(0); + + if (!isShiftedMask_64(NonZeroBits)) + return false; + + ShiftAmount = countTrailingZeros(NonZeroBits); + MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount); + + // BFI encompasses sufficiently many nodes that it's worth inserting an extra + // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL + // amount. + Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount); + + return true; +} + +// Given a OR operation, check if we have the following pattern +// ubfm c, b, imm, imm2 (or something that does the same jobs, see +// isBitfieldExtractOp) +// d = e & mask2 ; where mask is a binary sequence of 1..10..0 and +// countTrailingZeros(mask2) == imm2 - imm + 1 +// f = d | c +// if yes, given reference arguments will be update so that one can replace +// the OR instruction with: +// f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2 +static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst, + SDValue &Src, unsigned &ImmR, + unsigned &ImmS, SelectionDAG *CurDAG) { + assert(N->getOpcode() == ISD::OR && "Expect a OR operation"); + + // Set Opc + EVT VT = N->getValueType(0); + if (VT == MVT::i32) + Opc = AArch64::BFMWri; + else if (VT == MVT::i64) + Opc = AArch64::BFMXri; + else + return false; + + // Because of simplify-demanded-bits in DAGCombine, involved masks may not + // have the expected shape. Try to undo that. + APInt UsefulBits; + getUsefulBits(SDValue(N, 0), UsefulBits); + + unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros(); + unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros(); + + // OR is commutative, check both possibilities (does llvm provide a + // way to do that directely, e.g., via code matcher?) + SDValue OrOpd1Val = N->getOperand(1); + SDNode *OrOpd0 = N->getOperand(0).getNode(); + SDNode *OrOpd1 = N->getOperand(1).getNode(); + for (int i = 0; i < 2; + ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) { + unsigned BFXOpc; + int DstLSB, Width; + if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS, + NumberOfIgnoredLowBits, true)) { + // Check that the returned opcode is compatible with the pattern, + // i.e., same type and zero extended (U and not S) + if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) || + (BFXOpc != AArch64::UBFMWri && VT == MVT::i32)) + continue; + + // Compute the width of the bitfield insertion + DstLSB = 0; + Width = ImmS - ImmR + 1; + // FIXME: This constraint is to catch bitfield insertion we may + // want to widen the pattern if we want to grab general bitfied + // move case + if (Width <= 0) + continue; + + // If the mask on the insertee is correct, we have a BFXIL operation. We + // can share the ImmR and ImmS values from the already-computed UBFM. + } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src, + DstLSB, Width)) { + ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits(); + ImmS = Width - 1; + } else + continue; + + // Check the second part of the pattern + EVT VT = OrOpd1->getValueType(0); + assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand"); + + // Compute the Known Zero for the candidate of the first operand. + // This allows to catch more general case than just looking for + // AND with imm. Indeed, simplify-demanded-bits may have removed + // the AND instruction because it proves it was useless. + APInt KnownZero, KnownOne; + CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne); + + // Check if there is enough room for the second operand to appear + // in the first one + APInt BitsToBeInserted = + APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width); + + if ((BitsToBeInserted & ~KnownZero) != 0) + continue; + + // Set the first operand + uint64_t Imm; + if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) && + isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT)) + // In that case, we can eliminate the AND + Dst = OrOpd1->getOperand(0); + else + // Maybe the AND has been removed by simplify-demanded-bits + // or is useful because it discards more bits + Dst = OrOpd1Val; + + // both parts match + return true; + } + + return false; +} + +SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) { + if (N->getOpcode() != ISD::OR) + return nullptr; + + unsigned Opc; + unsigned LSB, MSB; + SDValue Opd0, Opd1; + + if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG)) + return nullptr; + + EVT VT = N->getValueType(0); + SDLoc dl(N); + SDValue Ops[] = { Opd0, + Opd1, + CurDAG->getTargetConstant(LSB, dl, VT), + CurDAG->getTargetConstant(MSB, dl, VT) }; + return CurDAG->SelectNodeTo(N, Opc, VT, Ops); +} + +/// GenerateInexactFlagIfNeeded - Insert FRINTX instruction to generate inexact +/// signal on round-to-integer operations if needed. C11 leaves it +/// implementation-defined whether these operations trigger an inexact +/// exception. IEEE says they don't. Unfortunately, Darwin decided they do so +/// we sometimes have to insert a special instruction just to set the right bit +/// in FPSR. +SDNode *AArch64DAGToDAGISel::GenerateInexactFlagIfNeeded(const SDValue &In, + unsigned InTyVariant, + SDLoc DL) { + if (Subtarget->isTargetDarwin() && !TM.Options.UnsafeFPMath) { + // Pick the right FRINTX using InTyVariant needed to set the flags. + // InTyVariant is 0 for 32-bit and 1 for 64-bit. + unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr }; + return CurDAG->getMachineNode(FRINTXOpcs[InTyVariant], DL, + In.getValueType(), MVT::Glue, In); + } + return nullptr; +} + +SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) { + EVT VT = N->getValueType(0); + unsigned Variant; + unsigned Opc; + + if (VT == MVT::f32) { + Variant = 0; + } else if (VT == MVT::f64) { + Variant = 1; + } else + return nullptr; // Unrecognized argument type. Fall back on default codegen. + + switch (N->getOpcode()) { + default: + return nullptr; // Unrecognized libm ISD node. Fall back on default codegen. + case ISD::FCEIL: { + unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr }; + Opc = FRINTPOpcs[Variant]; + break; + } + case ISD::FFLOOR: { + unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr }; + Opc = FRINTMOpcs[Variant]; + break; + } + case ISD::FTRUNC: { + unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr }; + Opc = FRINTZOpcs[Variant]; + break; + } + case ISD::FROUND: { + unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr }; + Opc = FRINTAOpcs[Variant]; + break; + } + } + + SDLoc dl(N); + SDValue In = N->getOperand(0); + SmallVector Ops; + Ops.push_back(In); + + if (SDNode *FRINTXNode = GenerateInexactFlagIfNeeded(In, Variant, dl)) + Ops.push_back(SDValue(FRINTXNode, 1)); + + return CurDAG->getMachineNode(Opc, dl, VT, Ops); +} + +/// SelectFPConvertWithRound - Try to combine FP rounding and +/// FP-INT conversion. +SDNode *AArch64DAGToDAGISel::SelectFPConvertWithRound(SDNode *N) { + SDNode *Op0 = N->getOperand(0).getNode(); + + // Return if the round op is used by other nodes, as this would result in two + // FRINTX, one each for round and convert. + if (!Op0->hasOneUse()) + return nullptr; + + unsigned InTyVariant; + EVT InTy = Op0->getValueType(0); + if (InTy == MVT::f32) + InTyVariant = 0; + else if (InTy == MVT::f64) + InTyVariant = 1; + else + return nullptr; + + unsigned OutTyVariant; + EVT OutTy = N->getValueType(0); + if (OutTy == MVT::i32) + OutTyVariant = 0; + else if (OutTy == MVT::i64) + OutTyVariant = 1; + else + return nullptr; + + assert((N->getOpcode() == ISD::FP_TO_SINT + || N->getOpcode() == ISD::FP_TO_UINT) && "Unexpected opcode!"); + unsigned FpConVariant = N->getOpcode() == ISD::FP_TO_SINT ? 0 : 1; + + unsigned Opc; + switch (Op0->getOpcode()) { + default: + return nullptr; + case ISD::FCEIL: { + unsigned FCVTPOpcs[2][2][2] = { + { { AArch64::FCVTPSUWSr, AArch64::FCVTPSUXSr }, + { AArch64::FCVTPSUWDr, AArch64::FCVTPSUXDr } }, + { { AArch64::FCVTPUUWSr, AArch64::FCVTPUUXSr }, + { AArch64::FCVTPUUWDr, AArch64::FCVTPUUXDr } } }; + Opc = FCVTPOpcs[FpConVariant][InTyVariant][OutTyVariant]; + break; + } + case ISD::FFLOOR: { + unsigned FCVTMOpcs[2][2][2] = { + { { AArch64::FCVTMSUWSr, AArch64::FCVTMSUXSr }, + { AArch64::FCVTMSUWDr, AArch64::FCVTMSUXDr } }, + { { AArch64::FCVTMUUWSr, AArch64::FCVTMUUXSr }, + { AArch64::FCVTMUUWDr, AArch64::FCVTMUUXDr } } }; + Opc = FCVTMOpcs[FpConVariant][InTyVariant][OutTyVariant]; + break; + } + case ISD::FTRUNC: { + unsigned FCVTZOpcs[2][2][2] = { + { { AArch64::FCVTZSUWSr, AArch64::FCVTZSUXSr }, + { AArch64::FCVTZSUWDr, AArch64::FCVTZSUXDr } }, + { { AArch64::FCVTZUUWSr, AArch64::FCVTZUUXSr }, + { AArch64::FCVTZUUWDr, AArch64::FCVTZUUXDr } } }; + Opc = FCVTZOpcs[FpConVariant][InTyVariant][OutTyVariant]; + break; + } + case ISD::FROUND: { + unsigned FCVTAOpcs[2][2][2] = { + { { AArch64::FCVTASUWSr, AArch64::FCVTASUXSr }, + { AArch64::FCVTASUWDr, AArch64::FCVTASUXDr } }, + { { AArch64::FCVTAUUWSr, AArch64::FCVTAUUXSr }, + { AArch64::FCVTAUUWDr, AArch64::FCVTAUUXDr } } }; + Opc = FCVTAOpcs[FpConVariant][InTyVariant][OutTyVariant]; + break; + } + } + + SDLoc DL(N); + SDValue In = Op0->getOperand(0); + SmallVector Ops; + Ops.push_back(In); + + if (SDNode *FRINTXNode = GenerateInexactFlagIfNeeded(In, InTyVariant, DL)) + Ops.push_back(SDValue(FRINTXNode, 1)); + + return CurDAG->getMachineNode(Opc, DL, OutTy, Ops); +} + +bool +AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, + unsigned RegWidth) { + APFloat FVal(0.0); + if (ConstantFPSDNode *CN = dyn_cast(N)) + FVal = CN->getValueAPF(); + else if (LoadSDNode *LN = dyn_cast(N)) { + // Some otherwise illegal constants are allowed in this case. + if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow || + !isa(LN->getOperand(1)->getOperand(1))) + return false; + + ConstantPoolSDNode *CN = + dyn_cast(LN->getOperand(1)->getOperand(1)); + FVal = cast(CN->getConstVal())->getValueAPF(); + } else + return false; + + // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits + // is between 1 and 32 for a destination w-register, or 1 and 64 for an + // x-register. + // + // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we + // want THIS_NODE to be 2^fbits. This is much easier to deal with using + // integers. + bool IsExact; + + // fbits is between 1 and 64 in the worst-case, which means the fmul + // could have 2^64 as an actual operand. Need 65 bits of precision. + APSInt IntVal(65, true); + FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact); + + // N.b. isPowerOf2 also checks for > 0. + if (!IsExact || !IntVal.isPowerOf2()) return false; + unsigned FBits = IntVal.logBase2(); + + // Checks above should have guaranteed that we haven't lost information in + // finding FBits, but it must still be in range. + if (FBits == 0 || FBits > RegWidth) return false; + + FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32); + return true; +} + +// Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields +// of the string and obtains the integer values from them and combines these +// into a single value to be used in the MRS/MSR instruction. +static int getIntOperandFromRegisterString(StringRef RegString) { + SmallVector Fields; + RegString.split(Fields, ":"); + + if (Fields.size() == 1) + return -1; + + assert(Fields.size() == 5 + && "Invalid number of fields in read register string"); + + SmallVector Ops; + bool AllIntFields = true; + + for (StringRef Field : Fields) { + unsigned IntField; + AllIntFields &= !Field.getAsInteger(10, IntField); + Ops.push_back(IntField); + } + + assert(AllIntFields && + "Unexpected non-integer value in special register string."); + + // Need to combine the integer fields of the string into a single value + // based on the bit encoding of MRS/MSR instruction. + return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) | + (Ops[3] << 3) | (Ops[4]); +} + +// Lower the read_register intrinsic to an MRS instruction node if the special +// register string argument is either of the form detailed in the ALCE (the +// form described in getIntOperandsFromRegsterString) or is a named register +// known by the MRS SysReg mapper. +SDNode *AArch64DAGToDAGISel::SelectReadRegister(SDNode *N) { + const MDNodeSDNode *MD = dyn_cast(N->getOperand(1)); + const MDString *RegString = dyn_cast(MD->getMD()->getOperand(0)); + SDLoc DL(N); + + int Reg = getIntOperandFromRegisterString(RegString->getString()); + if (Reg != -1) + return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0), + MVT::Other, + CurDAG->getTargetConstant(Reg, DL, MVT::i32), + N->getOperand(0)); + + // Use the sysreg mapper to map the remaining possible strings to the + // value for the register to be used for the instruction operand. + AArch64SysReg::MRSMapper mapper; + bool IsValidSpecialReg; + Reg = mapper.fromString(RegString->getString(), + Subtarget->getFeatureBits(), + IsValidSpecialReg); + if (IsValidSpecialReg) + return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0), + MVT::Other, + CurDAG->getTargetConstant(Reg, DL, MVT::i32), + N->getOperand(0)); + + return nullptr; +} + +// Lower the write_register intrinsic to an MSR instruction node if the special +// register string argument is either of the form detailed in the ALCE (the +// form described in getIntOperandsFromRegsterString) or is a named register +// known by the MSR SysReg mapper. +SDNode *AArch64DAGToDAGISel::SelectWriteRegister(SDNode *N) { + const MDNodeSDNode *MD = dyn_cast(N->getOperand(1)); + const MDString *RegString = dyn_cast(MD->getMD()->getOperand(0)); + SDLoc DL(N); + + int Reg = getIntOperandFromRegisterString(RegString->getString()); + if (Reg != -1) + return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other, + CurDAG->getTargetConstant(Reg, DL, MVT::i32), + N->getOperand(2), N->getOperand(0)); + + // Check if the register was one of those allowed as the pstatefield value in + // the MSR (immediate) instruction. To accept the values allowed in the + // pstatefield for the MSR (immediate) instruction, we also require that an + // immediate value has been provided as an argument, we know that this is + // the case as it has been ensured by semantic checking. + AArch64PState::PStateMapper PMapper; + bool IsValidSpecialReg; + Reg = PMapper.fromString(RegString->getString(), + Subtarget->getFeatureBits(), + IsValidSpecialReg); + if (IsValidSpecialReg) { + assert (isa(N->getOperand(2)) + && "Expected a constant integer expression."); + uint64_t Immed = cast(N->getOperand(2))->getZExtValue(); + return CurDAG->getMachineNode(AArch64::MSRpstate, DL, MVT::Other, + CurDAG->getTargetConstant(Reg, DL, MVT::i32), + CurDAG->getTargetConstant(Immed, DL, MVT::i16), + N->getOperand(0)); + } + + // Use the sysreg mapper to attempt to map the remaining possible strings + // to the value for the register to be used for the MSR (register) + // instruction operand. + AArch64SysReg::MSRMapper Mapper; + Reg = Mapper.fromString(RegString->getString(), + Subtarget->getFeatureBits(), + IsValidSpecialReg); + + if (IsValidSpecialReg) + return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other, + CurDAG->getTargetConstant(Reg, DL, MVT::i32), + N->getOperand(2), N->getOperand(0)); + + return nullptr; +} + +SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { + // Dump information about the Node being selected + DEBUG(errs() << "Selecting: "); + DEBUG(Node->dump(CurDAG)); + DEBUG(errs() << "\n"); + + // If we have a custom node, we already have selected! + if (Node->isMachineOpcode()) { + DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); + Node->setNodeId(-1); + return nullptr; + } + + // Few custom selection stuff. + SDNode *ResNode = nullptr; + EVT VT = Node->getValueType(0); + + switch (Node->getOpcode()) { + default: + break; + + case ISD::READ_REGISTER: + if (SDNode *Res = SelectReadRegister(Node)) + return Res; + break; + + case ISD::WRITE_REGISTER: + if (SDNode *Res = SelectWriteRegister(Node)) + return Res; + break; + + case ISD::ADD: + if (SDNode *I = SelectMLAV64LaneV128(Node)) + return I; + break; + + case ISD::LOAD: { + // Try to select as an indexed load. Fall through to normal processing + // if we can't. + bool Done = false; + SDNode *I = SelectIndexedLoad(Node, Done); + if (Done) + return I; + break; + } + + case ISD::SRL: + case ISD::AND: + case ISD::SRA: + if (SDNode *I = SelectBitfieldExtractOp(Node)) + return I; + break; + + case ISD::OR: + if (SDNode *I = SelectBitfieldInsertOp(Node)) + return I; + break; + + case ISD::EXTRACT_VECTOR_ELT: { + // Extracting lane zero is a special case where we can just use a plain + // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for + // the rest of the compiler, especially the register allocator and copyi + // propagation, to reason about, so is preferred when it's possible to + // use it. + ConstantSDNode *LaneNode = cast(Node->getOperand(1)); + // Bail and use the default Select() for non-zero lanes. + if (LaneNode->getZExtValue() != 0) + break; + // If the element type is not the same as the result type, likewise + // bail and use the default Select(), as there's more to do than just + // a cross-class COPY. This catches extracts of i8 and i16 elements + // since they will need an explicit zext. + if (VT != Node->getOperand(0).getValueType().getVectorElementType()) + break; + unsigned SubReg; + switch (Node->getOperand(0) + .getValueType() + .getVectorElementType() + .getSizeInBits()) { + default: + llvm_unreachable("Unexpected vector element type!"); + case 64: + SubReg = AArch64::dsub; + break; + case 32: + SubReg = AArch64::ssub; + break; + case 16: + SubReg = AArch64::hsub; + break; + case 8: + llvm_unreachable("unexpected zext-requiring extract element!"); + } + SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT, + Node->getOperand(0)); + DEBUG(dbgs() << "ISEL: Custom selection!\n=> "); + DEBUG(Extract->dumpr(CurDAG)); + DEBUG(dbgs() << "\n"); + return Extract.getNode(); + } + case ISD::Constant: { + // Materialize zero constants as copies from WZR/XZR. This allows + // the coalescer to propagate these into other instructions. + ConstantSDNode *ConstNode = cast(Node); + if (ConstNode->isNullValue()) { + if (VT == MVT::i32) + return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node), + AArch64::WZR, MVT::i32).getNode(); + else if (VT == MVT::i64) + return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node), + AArch64::XZR, MVT::i64).getNode(); + } + break; + } + + case ISD::FrameIndex: { + // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm. + int FI = cast(Node)->getIndex(); + unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0); + const TargetLowering *TLI = getTargetLowering(); + SDValue TFI = CurDAG->getTargetFrameIndex( + FI, TLI->getPointerTy(CurDAG->getDataLayout())); + SDLoc DL(Node); + SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32), + CurDAG->getTargetConstant(Shifter, DL, MVT::i32) }; + return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops); + } + case ISD::INTRINSIC_W_CHAIN: { + unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + switch (IntNo) { + default: + break; + case Intrinsic::aarch64_ldaxp: + case Intrinsic::aarch64_ldxp: { + unsigned Op = + IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX; + SDValue MemAddr = Node->getOperand(2); + SDLoc DL(Node); + SDValue Chain = Node->getOperand(0); + + SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64, + MVT::Other, MemAddr, Chain); + + // Transfer memoperands. + MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); + MemOp[0] = cast(Node)->getMemOperand(); + cast(Ld)->setMemRefs(MemOp, MemOp + 1); + return Ld; + } + case Intrinsic::aarch64_stlxp: + case Intrinsic::aarch64_stxp: { + unsigned Op = + IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX; + SDLoc DL(Node); + SDValue Chain = Node->getOperand(0); + SDValue ValLo = Node->getOperand(2); + SDValue ValHi = Node->getOperand(3); + SDValue MemAddr = Node->getOperand(4); + + // Place arguments in the right order. + SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain}; + + SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops); + // Transfer memoperands. + MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); + MemOp[0] = cast(Node)->getMemOperand(); + cast(St)->setMemRefs(MemOp, MemOp + 1); + + return St; + } + case Intrinsic::aarch64_neon_ld1x2: + if (VT == MVT::v8i8) + return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0); + break; + case Intrinsic::aarch64_neon_ld1x3: + if (VT == MVT::v8i8) + return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0); + break; + case Intrinsic::aarch64_neon_ld1x4: + if (VT == MVT::v8i8) + return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0); + break; + case Intrinsic::aarch64_neon_ld2: + if (VT == MVT::v8i8) + return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0); + break; + case Intrinsic::aarch64_neon_ld3: + if (VT == MVT::v8i8) + return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0); + break; + case Intrinsic::aarch64_neon_ld4: + if (VT == MVT::v8i8) + return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0); + break; + case Intrinsic::aarch64_neon_ld2r: + if (VT == MVT::v8i8) + return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0); + break; + case Intrinsic::aarch64_neon_ld3r: + if (VT == MVT::v8i8) + return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0); + break; + case Intrinsic::aarch64_neon_ld4r: + if (VT == MVT::v8i8) + return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0); + break; + case Intrinsic::aarch64_neon_ld2lane: + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectLoadLane(Node, 2, AArch64::LD2i8); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectLoadLane(Node, 2, AArch64::LD2i16); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectLoadLane(Node, 2, AArch64::LD2i32); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectLoadLane(Node, 2, AArch64::LD2i64); + break; + case Intrinsic::aarch64_neon_ld3lane: + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectLoadLane(Node, 3, AArch64::LD3i8); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectLoadLane(Node, 3, AArch64::LD3i16); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectLoadLane(Node, 3, AArch64::LD3i32); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectLoadLane(Node, 3, AArch64::LD3i64); + break; + case Intrinsic::aarch64_neon_ld4lane: + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectLoadLane(Node, 4, AArch64::LD4i8); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectLoadLane(Node, 4, AArch64::LD4i16); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectLoadLane(Node, 4, AArch64::LD4i32); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectLoadLane(Node, 4, AArch64::LD4i64); + break; + } + } break; + case ISD::INTRINSIC_WO_CHAIN: { + unsigned IntNo = cast(Node->getOperand(0))->getZExtValue(); + switch (IntNo) { + default: + break; + case Intrinsic::aarch64_neon_tbl2: + return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two + : AArch64::TBLv16i8Two, + false); + case Intrinsic::aarch64_neon_tbl3: + return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three + : AArch64::TBLv16i8Three, + false); + case Intrinsic::aarch64_neon_tbl4: + return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four + : AArch64::TBLv16i8Four, + false); + case Intrinsic::aarch64_neon_tbx2: + return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two + : AArch64::TBXv16i8Two, + true); + case Intrinsic::aarch64_neon_tbx3: + return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three + : AArch64::TBXv16i8Three, + true); + case Intrinsic::aarch64_neon_tbx4: + return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four + : AArch64::TBXv16i8Four, + true); + case Intrinsic::aarch64_neon_smull: + case Intrinsic::aarch64_neon_umull: + if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node)) + return N; + break; + } + break; + } + case ISD::INTRINSIC_VOID: { + unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + if (Node->getNumOperands() >= 3) + VT = Node->getOperand(2)->getValueType(0); + switch (IntNo) { + default: + break; + case Intrinsic::aarch64_neon_st1x2: { + if (VT == MVT::v8i8) + return SelectStore(Node, 2, AArch64::ST1Twov8b); + else if (VT == MVT::v16i8) + return SelectStore(Node, 2, AArch64::ST1Twov16b); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectStore(Node, 2, AArch64::ST1Twov4h); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectStore(Node, 2, AArch64::ST1Twov8h); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectStore(Node, 2, AArch64::ST1Twov2s); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectStore(Node, 2, AArch64::ST1Twov4s); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectStore(Node, 2, AArch64::ST1Twov2d); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectStore(Node, 2, AArch64::ST1Twov1d); + break; + } + case Intrinsic::aarch64_neon_st1x3: { + if (VT == MVT::v8i8) + return SelectStore(Node, 3, AArch64::ST1Threev8b); + else if (VT == MVT::v16i8) + return SelectStore(Node, 3, AArch64::ST1Threev16b); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectStore(Node, 3, AArch64::ST1Threev4h); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectStore(Node, 3, AArch64::ST1Threev8h); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectStore(Node, 3, AArch64::ST1Threev2s); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectStore(Node, 3, AArch64::ST1Threev4s); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectStore(Node, 3, AArch64::ST1Threev2d); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectStore(Node, 3, AArch64::ST1Threev1d); + break; + } + case Intrinsic::aarch64_neon_st1x4: { + if (VT == MVT::v8i8) + return SelectStore(Node, 4, AArch64::ST1Fourv8b); + else if (VT == MVT::v16i8) + return SelectStore(Node, 4, AArch64::ST1Fourv16b); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectStore(Node, 4, AArch64::ST1Fourv4h); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectStore(Node, 4, AArch64::ST1Fourv8h); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectStore(Node, 4, AArch64::ST1Fourv2s); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectStore(Node, 4, AArch64::ST1Fourv4s); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectStore(Node, 4, AArch64::ST1Fourv2d); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectStore(Node, 4, AArch64::ST1Fourv1d); + break; + } + case Intrinsic::aarch64_neon_st2: { + if (VT == MVT::v8i8) + return SelectStore(Node, 2, AArch64::ST2Twov8b); + else if (VT == MVT::v16i8) + return SelectStore(Node, 2, AArch64::ST2Twov16b); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectStore(Node, 2, AArch64::ST2Twov4h); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectStore(Node, 2, AArch64::ST2Twov8h); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectStore(Node, 2, AArch64::ST2Twov2s); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectStore(Node, 2, AArch64::ST2Twov4s); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectStore(Node, 2, AArch64::ST2Twov2d); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectStore(Node, 2, AArch64::ST1Twov1d); + break; + } + case Intrinsic::aarch64_neon_st3: { + if (VT == MVT::v8i8) + return SelectStore(Node, 3, AArch64::ST3Threev8b); + else if (VT == MVT::v16i8) + return SelectStore(Node, 3, AArch64::ST3Threev16b); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectStore(Node, 3, AArch64::ST3Threev4h); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectStore(Node, 3, AArch64::ST3Threev8h); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectStore(Node, 3, AArch64::ST3Threev2s); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectStore(Node, 3, AArch64::ST3Threev4s); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectStore(Node, 3, AArch64::ST3Threev2d); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectStore(Node, 3, AArch64::ST1Threev1d); + break; + } + case Intrinsic::aarch64_neon_st4: { + if (VT == MVT::v8i8) + return SelectStore(Node, 4, AArch64::ST4Fourv8b); + else if (VT == MVT::v16i8) + return SelectStore(Node, 4, AArch64::ST4Fourv16b); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectStore(Node, 4, AArch64::ST4Fourv4h); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectStore(Node, 4, AArch64::ST4Fourv8h); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectStore(Node, 4, AArch64::ST4Fourv2s); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectStore(Node, 4, AArch64::ST4Fourv4s); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectStore(Node, 4, AArch64::ST4Fourv2d); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectStore(Node, 4, AArch64::ST1Fourv1d); + break; + } + case Intrinsic::aarch64_neon_st2lane: { + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectStoreLane(Node, 2, AArch64::ST2i8); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectStoreLane(Node, 2, AArch64::ST2i16); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectStoreLane(Node, 2, AArch64::ST2i32); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectStoreLane(Node, 2, AArch64::ST2i64); + break; + } + case Intrinsic::aarch64_neon_st3lane: { + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectStoreLane(Node, 3, AArch64::ST3i8); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectStoreLane(Node, 3, AArch64::ST3i16); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectStoreLane(Node, 3, AArch64::ST3i32); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectStoreLane(Node, 3, AArch64::ST3i64); + break; + } + case Intrinsic::aarch64_neon_st4lane: { + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectStoreLane(Node, 4, AArch64::ST4i8); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectStoreLane(Node, 4, AArch64::ST4i16); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectStoreLane(Node, 4, AArch64::ST4i32); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectStoreLane(Node, 4, AArch64::ST4i64); + break; + } + } + break; + } + case AArch64ISD::LD2post: { + if (VT == MVT::v8i8) + return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0); + break; + } + case AArch64ISD::LD3post: { + if (VT == MVT::v8i8) + return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0); + break; + } + case AArch64ISD::LD4post: { + if (VT == MVT::v8i8) + return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0); + break; + } + case AArch64ISD::LD1x2post: { + if (VT == MVT::v8i8) + return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0); + break; + } + case AArch64ISD::LD1x3post: { + if (VT == MVT::v8i8) + return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0); + break; + } + case AArch64ISD::LD1x4post: { + if (VT == MVT::v8i8) + return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0); + break; + } + case AArch64ISD::LD1DUPpost: { + if (VT == MVT::v8i8) + return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0); + break; + } + case AArch64ISD::LD2DUPpost: { + if (VT == MVT::v8i8) + return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0); + break; + } + case AArch64ISD::LD3DUPpost: { + if (VT == MVT::v8i8) + return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0); + break; + } + case AArch64ISD::LD4DUPpost: { + if (VT == MVT::v8i8) + return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0); + else if (VT == MVT::v16i8) + return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0); + break; + } + case AArch64ISD::LD1LANEpost: { + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST); + break; + } + case AArch64ISD::LD2LANEpost: { + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST); + break; + } + case AArch64ISD::LD3LANEpost: { + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST); + break; + } + case AArch64ISD::LD4LANEpost: { + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST); + break; + } + case AArch64ISD::ST2post: { + VT = Node->getOperand(1).getValueType(); + if (VT == MVT::v8i8) + return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST); + else if (VT == MVT::v16i8) + return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST); + break; + } + case AArch64ISD::ST3post: { + VT = Node->getOperand(1).getValueType(); + if (VT == MVT::v8i8) + return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST); + else if (VT == MVT::v16i8) + return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST); + break; + } + case AArch64ISD::ST4post: { + VT = Node->getOperand(1).getValueType(); + if (VT == MVT::v8i8) + return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST); + else if (VT == MVT::v16i8) + return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST); + break; + } + case AArch64ISD::ST1x2post: { + VT = Node->getOperand(1).getValueType(); + if (VT == MVT::v8i8) + return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST); + else if (VT == MVT::v16i8) + return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST); + break; + } + case AArch64ISD::ST1x3post: { + VT = Node->getOperand(1).getValueType(); + if (VT == MVT::v8i8) + return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST); + else if (VT == MVT::v16i8) + return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST); + break; + } + case AArch64ISD::ST1x4post: { + VT = Node->getOperand(1).getValueType(); + if (VT == MVT::v8i8) + return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST); + else if (VT == MVT::v16i8) + return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST); + else if (VT == MVT::v4i16 || VT == MVT::v4f16) + return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST); + else if (VT == MVT::v8i16 || VT == MVT::v8f16) + return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST); + else if (VT == MVT::v2i32 || VT == MVT::v2f32) + return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST); + else if (VT == MVT::v4i32 || VT == MVT::v4f32) + return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST); + else if (VT == MVT::v1i64 || VT == MVT::v1f64) + return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST); + else if (VT == MVT::v2i64 || VT == MVT::v2f64) + return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST); + break; + } + case AArch64ISD::ST2LANEpost: { + VT = Node->getOperand(1).getValueType(); + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST); + break; + } + case AArch64ISD::ST3LANEpost: { + VT = Node->getOperand(1).getValueType(); + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST); + break; + } + case AArch64ISD::ST4LANEpost: { + VT = Node->getOperand(1).getValueType(); + if (VT == MVT::v16i8 || VT == MVT::v8i8) + return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST); + else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || + VT == MVT::v8f16) + return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST); + else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || + VT == MVT::v2f32) + return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST); + else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || + VT == MVT::v1f64) + return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST); + break; + } + + case ISD::FCEIL: + case ISD::FFLOOR: + case ISD::FTRUNC: + case ISD::FROUND: + if (SDNode *I = SelectLIBM(Node)) + return I; + break; + + case ISD::FP_TO_SINT: + case ISD::FP_TO_UINT: + if (SDNode *I = SelectFPConvertWithRound(Node)) + return I; + break; + } + + // Select the default instruction + ResNode = SelectCode(Node); + + DEBUG(errs() << "=> "); + if (ResNode == nullptr || ResNode == Node) + DEBUG(Node->dump(CurDAG)); + else + DEBUG(ResNode->dump(CurDAG)); + DEBUG(errs() << "\n"); return ResNode; } -/// This pass converts a legalized DAG into a AArch64-specific DAG, ready for -/// instruction scheduling. -FunctionPass *llvm::createAArch64ISelDAG(AArch64TargetMachine &TM, +/// createAArch64ISelDag - This pass converts a legalized DAG into a +/// AArch64-specific DAG, ready for instruction scheduling. +FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM, CodeGenOpt::Level OptLevel) { return new AArch64DAGToDAGISel(TM, OptLevel); }