1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the AArch64 target.
12 //===----------------------------------------------------------------------===//
15 #include "AArch64InstrInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "AArch64TargetMachine.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/ADT/APSInt.h"
20 #include "llvm/CodeGen/SelectionDAGISel.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/raw_ostream.h"
27 #define DEBUG_TYPE "aarch64-isel"
29 //===--------------------------------------------------------------------===//
30 /// AArch64 specific code to select AArch64 machine instructions for
31 /// SelectionDAG operations.
35 class AArch64DAGToDAGISel : public SelectionDAGISel {
36 AArch64TargetMachine &TM;
38 /// Keep a pointer to the AArch64Subtarget around so that we can
39 /// make the right decision when generating code for different targets.
40 const AArch64Subtarget *Subtarget;
43 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
44 CodeGenOpt::Level OptLevel)
45 : SelectionDAGISel(tm, OptLevel), TM(tm),
46 Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
49 virtual const char *getPassName() const {
50 return "AArch64 Instruction Selection";
53 // Include the pieces autogenerated from the target description.
54 #include "AArch64GenDAGISel.inc"
56 template<unsigned MemSize>
57 bool SelectOffsetUImm12(SDValue N, SDValue &UImm12) {
58 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
59 if (!CN || CN->getZExtValue() % MemSize != 0
60 || CN->getZExtValue() / MemSize > 0xfff)
63 UImm12 = CurDAG->getTargetConstant(CN->getZExtValue() / MemSize, MVT::i64);
67 template<unsigned RegWidth>
68 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
69 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
72 /// Used for pre-lowered address-reference nodes, so we already know
73 /// the fields match. This operand's job is simply to add an
74 /// appropriate shift operand to the MOVZ/MOVK instruction.
75 template<unsigned LogShift>
76 bool SelectMOVWAddressRef(SDValue N, SDValue &Imm, SDValue &Shift) {
78 Shift = CurDAG->getTargetConstant(LogShift, MVT::i32);
82 bool SelectFPZeroOperand(SDValue N, SDValue &Dummy);
84 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
87 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
89 std::vector<SDValue> &OutOps);
91 bool SelectLogicalImm(SDValue N, SDValue &Imm);
93 template<unsigned RegWidth>
94 bool SelectTSTBOperand(SDValue N, SDValue &FixedPos) {
95 return SelectTSTBOperand(N, FixedPos, RegWidth);
98 bool SelectTSTBOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth);
100 SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32,
103 /// Put the given constant into a pool and return a DAG which will give its
105 SDValue getConstantPoolItemAddress(SDLoc DL, const Constant *CV);
107 SDNode *TrySelectToMoveImm(SDNode *N);
108 SDNode *LowerToFPLitPool(SDNode *Node);
109 SDNode *SelectToLitPool(SDNode *N);
111 SDNode* Select(SDNode*);
113 /// Get the opcode for table lookup instruction
114 unsigned getTBLOpc(bool IsExt, bool Is64Bit, unsigned NumOfVec);
116 /// Select NEON table lookup intrinsics. NumVecs should be 1, 2, 3 or 4.
117 /// IsExt is to indicate if the result will be extended with an argument.
118 SDNode *SelectVTBL(SDNode *N, unsigned NumVecs, bool IsExt);
120 /// Select NEON load intrinsics. NumVecs should be 1, 2, 3 or 4.
121 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
122 const uint16_t *Opcode);
124 /// Select NEON store intrinsics. NumVecs should be 1, 2, 3 or 4.
125 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
126 const uint16_t *Opcodes);
128 /// Form sequences of consecutive 64/128-bit registers for use in NEON
129 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
130 /// between 1 and 4 elements. If it contains a single element that is returned
131 /// unchanged; otherwise a REG_SEQUENCE value is returned.
132 SDValue createDTuple(ArrayRef<SDValue> Vecs);
133 SDValue createQTuple(ArrayRef<SDValue> Vecs);
135 /// Generic helper for the createDTuple/createQTuple
136 /// functions. Those should almost always be called instead.
137 SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
140 /// Select NEON load-duplicate intrinsics. NumVecs should be 2, 3 or 4.
141 /// The opcode array specifies the instructions used for load.
142 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
143 const uint16_t *Opcodes);
145 /// Select NEON load/store lane intrinsics. NumVecs should be 2, 3 or 4.
146 /// The opcode arrays specify the instructions used for load/store.
147 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
148 unsigned NumVecs, const uint16_t *Opcodes);
150 SDValue getTargetSubregToReg(int SRIdx, SDLoc DL, EVT VT, EVT VTD,
156 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
158 const ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
159 if (!CN) return false;
161 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
162 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
165 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
166 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
170 // fbits is between 1 and 64 in the worst-case, which means the fmul
171 // could have 2^64 as an actual operand. Need 65 bits of precision.
172 APSInt IntVal(65, true);
173 CN->getValueAPF().convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
175 // N.b. isPowerOf2 also checks for > 0.
176 if (!IsExact || !IntVal.isPowerOf2()) return false;
177 unsigned FBits = IntVal.logBase2();
179 // Checks above should have guaranteed that we haven't lost information in
180 // finding FBits, but it must still be in range.
181 if (FBits == 0 || FBits > RegWidth) return false;
183 FixedPos = CurDAG->getTargetConstant(64 - FBits, MVT::i32);
188 AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
190 std::vector<SDValue> &OutOps) {
191 switch (ConstraintCode) {
192 default: llvm_unreachable("Unrecognised AArch64 memory constraint");
194 // FIXME: more freedom is actually permitted for 'm'. We can go
195 // hunting for a base and an offset if we want. Of course, since
196 // we don't really know how the operand is going to be used we're
197 // probably restricted to the load/store pair's simm7 as an offset
200 OutOps.push_back(Op);
207 AArch64DAGToDAGISel::SelectFPZeroOperand(SDValue N, SDValue &Dummy) {
208 ConstantFPSDNode *Imm = dyn_cast<ConstantFPSDNode>(N);
209 if (!Imm || !Imm->getValueAPF().isPosZero())
212 // Doesn't actually carry any information, but keeps TableGen quiet.
213 Dummy = CurDAG->getTargetConstant(0, MVT::i32);
217 bool AArch64DAGToDAGISel::SelectLogicalImm(SDValue N, SDValue &Imm) {
219 uint32_t RegWidth = N.getValueType().getSizeInBits();
221 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
222 if (!CN) return false;
224 if (!A64Imms::isLogicalImm(RegWidth, CN->getZExtValue(), Bits))
227 Imm = CurDAG->getTargetConstant(Bits, MVT::i32);
231 SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) {
234 EVT DestType = Node->getValueType(0);
235 unsigned DestWidth = DestType.getSizeInBits();
240 uint32_t LogicalBits;
242 uint64_t BitPat = cast<ConstantSDNode>(Node)->getZExtValue();
243 if (A64Imms::isMOVZImm(DestWidth, BitPat, UImm16, Shift)) {
245 MOVOpcode = DestWidth == 64 ? AArch64::MOVZxii : AArch64::MOVZwii;
246 } else if (A64Imms::isMOVNImm(DestWidth, BitPat, UImm16, Shift)) {
248 MOVOpcode = DestWidth == 64 ? AArch64::MOVNxii : AArch64::MOVNwii;
249 } else if (DestWidth == 64 && A64Imms::isMOVNImm(32, BitPat, UImm16, Shift)) {
250 // To get something like 0x0000_0000_ffff_1234 into a 64-bit register we can
251 // use a 32-bit instruction: "movn w0, 0xedbc".
253 MOVOpcode = AArch64::MOVNwii;
254 } else if (A64Imms::isLogicalImm(DestWidth, BitPat, LogicalBits)) {
255 MOVOpcode = DestWidth == 64 ? AArch64::ORRxxi : AArch64::ORRwwi;
256 uint16_t ZR = DestWidth == 64 ? AArch64::XZR : AArch64::WZR;
258 return CurDAG->getMachineNode(MOVOpcode, dl, DestType,
259 CurDAG->getRegister(ZR, DestType),
260 CurDAG->getTargetConstant(LogicalBits, MVT::i32));
262 // Can't handle it in one instruction. There's scope for permitting two (or
263 // more) instructions, but that'll need more thought.
267 ResNode = CurDAG->getMachineNode(MOVOpcode, dl, MOVType,
268 CurDAG->getTargetConstant(UImm16, MVT::i32),
269 CurDAG->getTargetConstant(Shift, MVT::i32));
271 if (MOVType != DestType) {
272 ResNode = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
273 MVT::i64, MVT::i32, MVT::Other,
274 CurDAG->getTargetConstant(0, MVT::i64),
276 CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32));
283 AArch64DAGToDAGISel::getConstantPoolItemAddress(SDLoc DL,
284 const Constant *CV) {
285 EVT PtrVT = getTargetLowering()->getPointerTy();
287 switch (getTargetLowering()->getTargetMachine().getCodeModel()) {
288 case CodeModel::Small: {
290 getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
291 return CurDAG->getNode(
292 AArch64ISD::WrapperSmall, DL, PtrVT,
293 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_NO_FLAG),
294 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_LO12),
295 CurDAG->getConstant(Alignment, MVT::i32));
297 case CodeModel::Large: {
299 LitAddr = CurDAG->getMachineNode(
300 AArch64::MOVZxii, DL, PtrVT,
301 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G3),
302 CurDAG->getTargetConstant(3, MVT::i32));
303 LitAddr = CurDAG->getMachineNode(
304 AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
305 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC),
306 CurDAG->getTargetConstant(2, MVT::i32));
307 LitAddr = CurDAG->getMachineNode(
308 AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
309 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC),
310 CurDAG->getTargetConstant(1, MVT::i32));
311 LitAddr = CurDAG->getMachineNode(
312 AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
313 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC),
314 CurDAG->getTargetConstant(0, MVT::i32));
315 return SDValue(LitAddr, 0);
318 llvm_unreachable("Only small and large code models supported now");
322 SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
324 uint64_t UnsignedVal = cast<ConstantSDNode>(Node)->getZExtValue();
325 int64_t SignedVal = cast<ConstantSDNode>(Node)->getSExtValue();
326 EVT DestType = Node->getValueType(0);
328 // Since we may end up loading a 64-bit constant from a 32-bit entry the
329 // constant in the pool may have a different type to the eventual node.
330 ISD::LoadExtType Extension;
333 assert((DestType == MVT::i64 || DestType == MVT::i32)
334 && "Only expect integer constants at the moment");
336 if (DestType == MVT::i32) {
337 Extension = ISD::NON_EXTLOAD;
339 } else if (UnsignedVal <= UINT32_MAX) {
340 Extension = ISD::ZEXTLOAD;
342 } else if (SignedVal >= INT32_MIN && SignedVal <= INT32_MAX) {
343 Extension = ISD::SEXTLOAD;
346 Extension = ISD::NON_EXTLOAD;
350 Constant *CV = ConstantInt::get(Type::getIntNTy(*CurDAG->getContext(),
351 MemType.getSizeInBits()),
353 SDValue PoolAddr = getConstantPoolItemAddress(DL, CV);
355 getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
357 return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(),
359 MachinePointerInfo::getConstantPool(), MemType,
360 /* isVolatile = */ false,
361 /* isNonTemporal = */ false,
362 Alignment).getNode();
365 SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) {
367 const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue();
368 EVT DestType = Node->getValueType(0);
371 getTargetLowering()->getDataLayout()->getABITypeAlignment(FV->getType());
372 SDValue PoolAddr = getConstantPoolItemAddress(DL, FV);
374 return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr,
375 MachinePointerInfo::getConstantPool(),
376 /* isVolatile = */ false,
377 /* isNonTemporal = */ false,
378 /* isInvariant = */ true,
379 Alignment).getNode();
383 AArch64DAGToDAGISel::SelectTSTBOperand(SDValue N, SDValue &FixedPos,
385 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
386 if (!CN) return false;
388 uint64_t Val = CN->getZExtValue();
390 if (!isPowerOf2_64(Val)) return false;
392 unsigned TestedBit = Log2_64(Val);
393 // Checks above should have guaranteed that we haven't lost information in
394 // finding TestedBit, but it must still be in range.
395 if (TestedBit >= RegWidth) return false;
397 FixedPos = CurDAG->getTargetConstant(TestedBit, MVT::i64);
401 SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
402 unsigned Op16,unsigned Op32,
404 // Mostly direct translation to the given operations, except that we preserve
405 // the AtomicOrdering for use later on.
406 AtomicSDNode *AN = cast<AtomicSDNode>(Node);
407 EVT VT = AN->getMemoryVT();
412 else if (VT == MVT::i16)
414 else if (VT == MVT::i32)
416 else if (VT == MVT::i64)
419 llvm_unreachable("Unexpected atomic operation");
421 SmallVector<SDValue, 4> Ops;
422 for (unsigned i = 1; i < AN->getNumOperands(); ++i)
423 Ops.push_back(AN->getOperand(i));
425 Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
426 Ops.push_back(AN->getOperand(0)); // Chain moves to the end
428 return CurDAG->SelectNodeTo(Node, Op,
429 AN->getValueType(0), MVT::Other,
430 &Ops[0], Ops.size());
433 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
434 static unsigned RegClassIDs[] = { AArch64::DPairRegClassID,
435 AArch64::DTripleRegClassID,
436 AArch64::DQuadRegClassID };
437 static unsigned SubRegs[] = { AArch64::dsub_0, AArch64::dsub_1,
438 AArch64::dsub_2, AArch64::dsub_3 };
440 return createTuple(Regs, RegClassIDs, SubRegs);
443 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
444 static unsigned RegClassIDs[] = { AArch64::QPairRegClassID,
445 AArch64::QTripleRegClassID,
446 AArch64::QQuadRegClassID };
447 static unsigned SubRegs[] = { AArch64::qsub_0, AArch64::qsub_1,
448 AArch64::qsub_2, AArch64::qsub_3 };
450 return createTuple(Regs, RegClassIDs, SubRegs);
453 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
454 unsigned RegClassIDs[],
455 unsigned SubRegs[]) {
456 // There's no special register-class for a vector-list of 1 element: it's just
458 if (Regs.size() == 1)
461 assert(Regs.size() >= 2 && Regs.size() <= 4);
463 SDLoc DL(Regs[0].getNode());
465 SmallVector<SDValue, 4> Ops;
467 // First operand of REG_SEQUENCE is the desired RegClass.
469 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
471 // Then we get pairs of source & subregister-position for the components.
472 for (unsigned i = 0; i < Regs.size(); ++i) {
473 Ops.push_back(Regs[i]);
474 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
478 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
479 return SDValue(N, 0);
483 // Get the register stride update opcode of a VLD/VST instruction that
484 // is otherwise equivalent to the given fixed stride updating instruction.
485 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
488 case AArch64::LD1WB_8B_fixed: return AArch64::LD1WB_8B_register;
489 case AArch64::LD1WB_4H_fixed: return AArch64::LD1WB_4H_register;
490 case AArch64::LD1WB_2S_fixed: return AArch64::LD1WB_2S_register;
491 case AArch64::LD1WB_1D_fixed: return AArch64::LD1WB_1D_register;
492 case AArch64::LD1WB_16B_fixed: return AArch64::LD1WB_16B_register;
493 case AArch64::LD1WB_8H_fixed: return AArch64::LD1WB_8H_register;
494 case AArch64::LD1WB_4S_fixed: return AArch64::LD1WB_4S_register;
495 case AArch64::LD1WB_2D_fixed: return AArch64::LD1WB_2D_register;
497 case AArch64::LD2WB_8B_fixed: return AArch64::LD2WB_8B_register;
498 case AArch64::LD2WB_4H_fixed: return AArch64::LD2WB_4H_register;
499 case AArch64::LD2WB_2S_fixed: return AArch64::LD2WB_2S_register;
500 case AArch64::LD2WB_16B_fixed: return AArch64::LD2WB_16B_register;
501 case AArch64::LD2WB_8H_fixed: return AArch64::LD2WB_8H_register;
502 case AArch64::LD2WB_4S_fixed: return AArch64::LD2WB_4S_register;
503 case AArch64::LD2WB_2D_fixed: return AArch64::LD2WB_2D_register;
505 case AArch64::LD3WB_8B_fixed: return AArch64::LD3WB_8B_register;
506 case AArch64::LD3WB_4H_fixed: return AArch64::LD3WB_4H_register;
507 case AArch64::LD3WB_2S_fixed: return AArch64::LD3WB_2S_register;
508 case AArch64::LD3WB_16B_fixed: return AArch64::LD3WB_16B_register;
509 case AArch64::LD3WB_8H_fixed: return AArch64::LD3WB_8H_register;
510 case AArch64::LD3WB_4S_fixed: return AArch64::LD3WB_4S_register;
511 case AArch64::LD3WB_2D_fixed: return AArch64::LD3WB_2D_register;
513 case AArch64::LD4WB_8B_fixed: return AArch64::LD4WB_8B_register;
514 case AArch64::LD4WB_4H_fixed: return AArch64::LD4WB_4H_register;
515 case AArch64::LD4WB_2S_fixed: return AArch64::LD4WB_2S_register;
516 case AArch64::LD4WB_16B_fixed: return AArch64::LD4WB_16B_register;
517 case AArch64::LD4WB_8H_fixed: return AArch64::LD4WB_8H_register;
518 case AArch64::LD4WB_4S_fixed: return AArch64::LD4WB_4S_register;
519 case AArch64::LD4WB_2D_fixed: return AArch64::LD4WB_2D_register;
521 case AArch64::LD1x2WB_8B_fixed: return AArch64::LD1x2WB_8B_register;
522 case AArch64::LD1x2WB_4H_fixed: return AArch64::LD1x2WB_4H_register;
523 case AArch64::LD1x2WB_2S_fixed: return AArch64::LD1x2WB_2S_register;
524 case AArch64::LD1x2WB_1D_fixed: return AArch64::LD1x2WB_1D_register;
525 case AArch64::LD1x2WB_16B_fixed: return AArch64::LD1x2WB_16B_register;
526 case AArch64::LD1x2WB_8H_fixed: return AArch64::LD1x2WB_8H_register;
527 case AArch64::LD1x2WB_4S_fixed: return AArch64::LD1x2WB_4S_register;
528 case AArch64::LD1x2WB_2D_fixed: return AArch64::LD1x2WB_2D_register;
530 case AArch64::LD1x3WB_8B_fixed: return AArch64::LD1x3WB_8B_register;
531 case AArch64::LD1x3WB_4H_fixed: return AArch64::LD1x3WB_4H_register;
532 case AArch64::LD1x3WB_2S_fixed: return AArch64::LD1x3WB_2S_register;
533 case AArch64::LD1x3WB_1D_fixed: return AArch64::LD1x3WB_1D_register;
534 case AArch64::LD1x3WB_16B_fixed: return AArch64::LD1x3WB_16B_register;
535 case AArch64::LD1x3WB_8H_fixed: return AArch64::LD1x3WB_8H_register;
536 case AArch64::LD1x3WB_4S_fixed: return AArch64::LD1x3WB_4S_register;
537 case AArch64::LD1x3WB_2D_fixed: return AArch64::LD1x3WB_2D_register;
539 case AArch64::LD1x4WB_8B_fixed: return AArch64::LD1x4WB_8B_register;
540 case AArch64::LD1x4WB_4H_fixed: return AArch64::LD1x4WB_4H_register;
541 case AArch64::LD1x4WB_2S_fixed: return AArch64::LD1x4WB_2S_register;
542 case AArch64::LD1x4WB_1D_fixed: return AArch64::LD1x4WB_1D_register;
543 case AArch64::LD1x4WB_16B_fixed: return AArch64::LD1x4WB_16B_register;
544 case AArch64::LD1x4WB_8H_fixed: return AArch64::LD1x4WB_8H_register;
545 case AArch64::LD1x4WB_4S_fixed: return AArch64::LD1x4WB_4S_register;
546 case AArch64::LD1x4WB_2D_fixed: return AArch64::LD1x4WB_2D_register;
548 case AArch64::ST1WB_8B_fixed: return AArch64::ST1WB_8B_register;
549 case AArch64::ST1WB_4H_fixed: return AArch64::ST1WB_4H_register;
550 case AArch64::ST1WB_2S_fixed: return AArch64::ST1WB_2S_register;
551 case AArch64::ST1WB_1D_fixed: return AArch64::ST1WB_1D_register;
552 case AArch64::ST1WB_16B_fixed: return AArch64::ST1WB_16B_register;
553 case AArch64::ST1WB_8H_fixed: return AArch64::ST1WB_8H_register;
554 case AArch64::ST1WB_4S_fixed: return AArch64::ST1WB_4S_register;
555 case AArch64::ST1WB_2D_fixed: return AArch64::ST1WB_2D_register;
557 case AArch64::ST2WB_8B_fixed: return AArch64::ST2WB_8B_register;
558 case AArch64::ST2WB_4H_fixed: return AArch64::ST2WB_4H_register;
559 case AArch64::ST2WB_2S_fixed: return AArch64::ST2WB_2S_register;
560 case AArch64::ST2WB_16B_fixed: return AArch64::ST2WB_16B_register;
561 case AArch64::ST2WB_8H_fixed: return AArch64::ST2WB_8H_register;
562 case AArch64::ST2WB_4S_fixed: return AArch64::ST2WB_4S_register;
563 case AArch64::ST2WB_2D_fixed: return AArch64::ST2WB_2D_register;
565 case AArch64::ST3WB_8B_fixed: return AArch64::ST3WB_8B_register;
566 case AArch64::ST3WB_4H_fixed: return AArch64::ST3WB_4H_register;
567 case AArch64::ST3WB_2S_fixed: return AArch64::ST3WB_2S_register;
568 case AArch64::ST3WB_16B_fixed: return AArch64::ST3WB_16B_register;
569 case AArch64::ST3WB_8H_fixed: return AArch64::ST3WB_8H_register;
570 case AArch64::ST3WB_4S_fixed: return AArch64::ST3WB_4S_register;
571 case AArch64::ST3WB_2D_fixed: return AArch64::ST3WB_2D_register;
573 case AArch64::ST4WB_8B_fixed: return AArch64::ST4WB_8B_register;
574 case AArch64::ST4WB_4H_fixed: return AArch64::ST4WB_4H_register;
575 case AArch64::ST4WB_2S_fixed: return AArch64::ST4WB_2S_register;
576 case AArch64::ST4WB_16B_fixed: return AArch64::ST4WB_16B_register;
577 case AArch64::ST4WB_8H_fixed: return AArch64::ST4WB_8H_register;
578 case AArch64::ST4WB_4S_fixed: return AArch64::ST4WB_4S_register;
579 case AArch64::ST4WB_2D_fixed: return AArch64::ST4WB_2D_register;
581 case AArch64::ST1x2WB_8B_fixed: return AArch64::ST1x2WB_8B_register;
582 case AArch64::ST1x2WB_4H_fixed: return AArch64::ST1x2WB_4H_register;
583 case AArch64::ST1x2WB_2S_fixed: return AArch64::ST1x2WB_2S_register;
584 case AArch64::ST1x2WB_1D_fixed: return AArch64::ST1x2WB_1D_register;
585 case AArch64::ST1x2WB_16B_fixed: return AArch64::ST1x2WB_16B_register;
586 case AArch64::ST1x2WB_8H_fixed: return AArch64::ST1x2WB_8H_register;
587 case AArch64::ST1x2WB_4S_fixed: return AArch64::ST1x2WB_4S_register;
588 case AArch64::ST1x2WB_2D_fixed: return AArch64::ST1x2WB_2D_register;
590 case AArch64::ST1x3WB_8B_fixed: return AArch64::ST1x3WB_8B_register;
591 case AArch64::ST1x3WB_4H_fixed: return AArch64::ST1x3WB_4H_register;
592 case AArch64::ST1x3WB_2S_fixed: return AArch64::ST1x3WB_2S_register;
593 case AArch64::ST1x3WB_1D_fixed: return AArch64::ST1x3WB_1D_register;
594 case AArch64::ST1x3WB_16B_fixed: return AArch64::ST1x3WB_16B_register;
595 case AArch64::ST1x3WB_8H_fixed: return AArch64::ST1x3WB_8H_register;
596 case AArch64::ST1x3WB_4S_fixed: return AArch64::ST1x3WB_4S_register;
597 case AArch64::ST1x3WB_2D_fixed: return AArch64::ST1x3WB_2D_register;
599 case AArch64::ST1x4WB_8B_fixed: return AArch64::ST1x4WB_8B_register;
600 case AArch64::ST1x4WB_4H_fixed: return AArch64::ST1x4WB_4H_register;
601 case AArch64::ST1x4WB_2S_fixed: return AArch64::ST1x4WB_2S_register;
602 case AArch64::ST1x4WB_1D_fixed: return AArch64::ST1x4WB_1D_register;
603 case AArch64::ST1x4WB_16B_fixed: return AArch64::ST1x4WB_16B_register;
604 case AArch64::ST1x4WB_8H_fixed: return AArch64::ST1x4WB_8H_register;
605 case AArch64::ST1x4WB_4S_fixed: return AArch64::ST1x4WB_4S_register;
606 case AArch64::ST1x4WB_2D_fixed: return AArch64::ST1x4WB_2D_register;
608 // Post-index of duplicate loads
609 case AArch64::LD2R_WB_8B_fixed: return AArch64::LD2R_WB_8B_register;
610 case AArch64::LD2R_WB_4H_fixed: return AArch64::LD2R_WB_4H_register;
611 case AArch64::LD2R_WB_2S_fixed: return AArch64::LD2R_WB_2S_register;
612 case AArch64::LD2R_WB_1D_fixed: return AArch64::LD2R_WB_1D_register;
613 case AArch64::LD2R_WB_16B_fixed: return AArch64::LD2R_WB_16B_register;
614 case AArch64::LD2R_WB_8H_fixed: return AArch64::LD2R_WB_8H_register;
615 case AArch64::LD2R_WB_4S_fixed: return AArch64::LD2R_WB_4S_register;
616 case AArch64::LD2R_WB_2D_fixed: return AArch64::LD2R_WB_2D_register;
618 case AArch64::LD3R_WB_8B_fixed: return AArch64::LD3R_WB_8B_register;
619 case AArch64::LD3R_WB_4H_fixed: return AArch64::LD3R_WB_4H_register;
620 case AArch64::LD3R_WB_2S_fixed: return AArch64::LD3R_WB_2S_register;
621 case AArch64::LD3R_WB_1D_fixed: return AArch64::LD3R_WB_1D_register;
622 case AArch64::LD3R_WB_16B_fixed: return AArch64::LD3R_WB_16B_register;
623 case AArch64::LD3R_WB_8H_fixed: return AArch64::LD3R_WB_8H_register;
624 case AArch64::LD3R_WB_4S_fixed: return AArch64::LD3R_WB_4S_register;
625 case AArch64::LD3R_WB_2D_fixed: return AArch64::LD3R_WB_2D_register;
627 case AArch64::LD4R_WB_8B_fixed: return AArch64::LD4R_WB_8B_register;
628 case AArch64::LD4R_WB_4H_fixed: return AArch64::LD4R_WB_4H_register;
629 case AArch64::LD4R_WB_2S_fixed: return AArch64::LD4R_WB_2S_register;
630 case AArch64::LD4R_WB_1D_fixed: return AArch64::LD4R_WB_1D_register;
631 case AArch64::LD4R_WB_16B_fixed: return AArch64::LD4R_WB_16B_register;
632 case AArch64::LD4R_WB_8H_fixed: return AArch64::LD4R_WB_8H_register;
633 case AArch64::LD4R_WB_4S_fixed: return AArch64::LD4R_WB_4S_register;
634 case AArch64::LD4R_WB_2D_fixed: return AArch64::LD4R_WB_2D_register;
636 // Post-index of lane loads
637 case AArch64::LD2LN_WB_B_fixed: return AArch64::LD2LN_WB_B_register;
638 case AArch64::LD2LN_WB_H_fixed: return AArch64::LD2LN_WB_H_register;
639 case AArch64::LD2LN_WB_S_fixed: return AArch64::LD2LN_WB_S_register;
640 case AArch64::LD2LN_WB_D_fixed: return AArch64::LD2LN_WB_D_register;
642 case AArch64::LD3LN_WB_B_fixed: return AArch64::LD3LN_WB_B_register;
643 case AArch64::LD3LN_WB_H_fixed: return AArch64::LD3LN_WB_H_register;
644 case AArch64::LD3LN_WB_S_fixed: return AArch64::LD3LN_WB_S_register;
645 case AArch64::LD3LN_WB_D_fixed: return AArch64::LD3LN_WB_D_register;
647 case AArch64::LD4LN_WB_B_fixed: return AArch64::LD4LN_WB_B_register;
648 case AArch64::LD4LN_WB_H_fixed: return AArch64::LD4LN_WB_H_register;
649 case AArch64::LD4LN_WB_S_fixed: return AArch64::LD4LN_WB_S_register;
650 case AArch64::LD4LN_WB_D_fixed: return AArch64::LD4LN_WB_D_register;
652 // Post-index of lane stores
653 case AArch64::ST2LN_WB_B_fixed: return AArch64::ST2LN_WB_B_register;
654 case AArch64::ST2LN_WB_H_fixed: return AArch64::ST2LN_WB_H_register;
655 case AArch64::ST2LN_WB_S_fixed: return AArch64::ST2LN_WB_S_register;
656 case AArch64::ST2LN_WB_D_fixed: return AArch64::ST2LN_WB_D_register;
658 case AArch64::ST3LN_WB_B_fixed: return AArch64::ST3LN_WB_B_register;
659 case AArch64::ST3LN_WB_H_fixed: return AArch64::ST3LN_WB_H_register;
660 case AArch64::ST3LN_WB_S_fixed: return AArch64::ST3LN_WB_S_register;
661 case AArch64::ST3LN_WB_D_fixed: return AArch64::ST3LN_WB_D_register;
663 case AArch64::ST4LN_WB_B_fixed: return AArch64::ST4LN_WB_B_register;
664 case AArch64::ST4LN_WB_H_fixed: return AArch64::ST4LN_WB_H_register;
665 case AArch64::ST4LN_WB_S_fixed: return AArch64::ST4LN_WB_S_register;
666 case AArch64::ST4LN_WB_D_fixed: return AArch64::ST4LN_WB_D_register;
668 return Opc; // If not one we handle, return it unchanged.
671 SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating,
673 const uint16_t *Opcodes) {
674 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
676 EVT VT = N->getValueType(0);
677 unsigned OpcodeIndex;
678 bool is64BitVector = VT.is64BitVector();
679 switch (VT.getScalarType().getSizeInBits()) {
680 case 8: OpcodeIndex = is64BitVector ? 0 : 4; break;
681 case 16: OpcodeIndex = is64BitVector ? 1 : 5; break;
682 case 32: OpcodeIndex = is64BitVector ? 2 : 6; break;
683 case 64: OpcodeIndex = is64BitVector ? 3 : 7; break;
684 default: llvm_unreachable("unhandled vector load type");
686 unsigned Opc = Opcodes[OpcodeIndex];
688 SmallVector<SDValue, 2> Ops;
689 unsigned AddrOpIdx = isUpdating ? 1 : 2;
690 Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
693 SDValue Inc = N->getOperand(AddrOpIdx + 1);
694 if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
695 Opc = getVLDSTRegisterUpdateOpcode(Opc);
699 Ops.push_back(N->getOperand(0)); // Push back the Chain
701 SmallVector<EVT, 3> ResTys;
702 // Push back the type of return super register
704 ResTys.push_back(VT);
705 else if (NumVecs == 3)
706 ResTys.push_back(MVT::Untyped);
708 EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,
709 is64BitVector ? NumVecs : NumVecs * 2);
710 ResTys.push_back(ResTy);
714 ResTys.push_back(MVT::i64); // Type of the updated register
715 ResTys.push_back(MVT::Other); // Type of the Chain
717 SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
719 // Transfer memoperands.
720 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
721 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
722 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
727 // If NumVecs > 1, the return result is a super register containing 2-4
728 // consecutive vector registers.
729 SDValue SuperReg = SDValue(VLd, 0);
731 unsigned Sub0 = is64BitVector ? AArch64::dsub_0 : AArch64::qsub_0;
732 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
733 ReplaceUses(SDValue(N, Vec),
734 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
735 // Update users of the Chain
736 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
738 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
743 SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, bool isUpdating,
745 const uint16_t *Opcodes) {
746 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
749 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
750 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
752 unsigned AddrOpIdx = isUpdating ? 1 : 2;
753 unsigned Vec0Idx = 3;
754 EVT VT = N->getOperand(Vec0Idx).getValueType();
755 unsigned OpcodeIndex;
756 bool is64BitVector = VT.is64BitVector();
757 switch (VT.getScalarType().getSizeInBits()) {
758 case 8: OpcodeIndex = is64BitVector ? 0 : 4; break;
759 case 16: OpcodeIndex = is64BitVector ? 1 : 5; break;
760 case 32: OpcodeIndex = is64BitVector ? 2 : 6; break;
761 case 64: OpcodeIndex = is64BitVector ? 3 : 7; break;
762 default: llvm_unreachable("unhandled vector store type");
764 unsigned Opc = Opcodes[OpcodeIndex];
766 SmallVector<EVT, 2> ResTys;
768 ResTys.push_back(MVT::i64);
769 ResTys.push_back(MVT::Other); // Type for the Chain
771 SmallVector<SDValue, 6> Ops;
772 Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
775 SDValue Inc = N->getOperand(AddrOpIdx + 1);
776 if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
777 Opc = getVLDSTRegisterUpdateOpcode(Opc);
781 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Idx,
782 N->op_begin() + Vec0Idx + NumVecs);
783 SDValue SrcReg = is64BitVector ? createDTuple(Regs) : createQTuple(Regs);
784 Ops.push_back(SrcReg);
786 // Push back the Chain
787 Ops.push_back(N->getOperand(0));
789 // Transfer memoperands.
790 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
791 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
797 AArch64DAGToDAGISel::getTargetSubregToReg(int SRIdx, SDLoc DL, EVT VT, EVT VTD,
799 SDNode *Reg = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, DL,
801 CurDAG->getTargetConstant(0, MVT::i64),
803 CurDAG->getTargetConstant(AArch64::sub_64, MVT::i32));
804 return SDValue(Reg, 0);
807 SDNode *AArch64DAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
809 const uint16_t *Opcodes) {
810 assert(NumVecs >=2 && NumVecs <= 4 && "Load Dup NumVecs out-of-range");
813 EVT VT = N->getValueType(0);
814 unsigned OpcodeIndex;
815 bool is64BitVector = VT.is64BitVector();
816 switch (VT.getScalarType().getSizeInBits()) {
817 case 8: OpcodeIndex = is64BitVector ? 0 : 4; break;
818 case 16: OpcodeIndex = is64BitVector ? 1 : 5; break;
819 case 32: OpcodeIndex = is64BitVector ? 2 : 6; break;
820 case 64: OpcodeIndex = is64BitVector ? 3 : 7; break;
821 default: llvm_unreachable("unhandled vector duplicate lane load type");
823 unsigned Opc = Opcodes[OpcodeIndex];
826 SmallVector<SDValue, 6> Ops;
827 Ops.push_back(N->getOperand(1)); // Push back the Memory Address
829 SDValue Inc = N->getOperand(2);
830 if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
831 Opc = getVLDSTRegisterUpdateOpcode(Opc);
834 Ops.push_back(N->getOperand(0)); // Push back the Chain
836 SmallVector<EVT, 3> ResTys;
837 // Push back the type of return super register
839 ResTys.push_back(MVT::Untyped);
841 EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,
842 is64BitVector ? NumVecs : NumVecs * 2);
843 ResTys.push_back(ResTy);
846 ResTys.push_back(MVT::i64); // Type of the updated register
847 ResTys.push_back(MVT::Other); // Type of the Chain
848 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
850 // Transfer memoperands.
851 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
852 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
853 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
855 SuperReg = SDValue(VLdDup, 0);
856 unsigned Sub0 = is64BitVector ? AArch64::dsub_0 : AArch64::qsub_0;
857 // Update uses of each registers in super register
858 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
859 ReplaceUses(SDValue(N, Vec),
860 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
861 // Update uses of the Chain
862 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
864 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
868 // We only have 128-bit vector type of load/store lane instructions.
869 // If it is 64-bit vector, we also select it to the 128-bit instructions.
870 // Just use SUBREG_TO_REG to adapt the input to 128-bit vector and
871 // EXTRACT_SUBREG to get the 64-bit vector from the 128-bit vector output.
872 SDNode *AArch64DAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
873 bool isUpdating, unsigned NumVecs,
874 const uint16_t *Opcodes) {
875 assert(NumVecs >= 2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
877 unsigned AddrOpIdx = isUpdating ? 1 : 2;
878 unsigned Vec0Idx = 3;
880 SDValue Chain = N->getOperand(0);
882 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
883 EVT VT = N->getOperand(Vec0Idx).getValueType();
884 bool is64BitVector = VT.is64BitVector();
885 EVT VT64; // 64-bit Vector Type
889 VT = EVT::getVectorVT(*CurDAG->getContext(), VT.getVectorElementType(),
890 VT.getVectorNumElements() * 2);
893 unsigned OpcodeIndex;
894 switch (VT.getScalarType().getSizeInBits()) {
895 case 8: OpcodeIndex = 0; break;
896 case 16: OpcodeIndex = 1; break;
897 case 32: OpcodeIndex = 2; break;
898 case 64: OpcodeIndex = 3; break;
899 default: llvm_unreachable("unhandled vector lane load/store type");
901 unsigned Opc = Opcodes[OpcodeIndex];
903 SmallVector<EVT, 3> ResTys;
905 // Push back the type of return super register
907 ResTys.push_back(MVT::Untyped);
909 EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,
910 is64BitVector ? NumVecs : NumVecs * 2);
911 ResTys.push_back(ResTy);
915 ResTys.push_back(MVT::i64); // Type of the updated register
916 ResTys.push_back(MVT::Other); // Type of Chain
917 SmallVector<SDValue, 5> Ops;
918 Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
920 SDValue Inc = N->getOperand(AddrOpIdx + 1);
921 if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
922 Opc = getVLDSTRegisterUpdateOpcode(Opc);
926 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Idx,
927 N->op_begin() + Vec0Idx + NumVecs);
929 for (unsigned i = 0; i < Regs.size(); i++)
930 Regs[i] = getTargetSubregToReg(AArch64::sub_64, dl, VT, VT64, Regs[i]);
931 SDValue SuperReg = createQTuple(Regs);
933 Ops.push_back(SuperReg); // Source Reg
934 SDValue LaneValue = CurDAG->getTargetConstant(Lane, MVT::i32);
935 Ops.push_back(LaneValue);
936 Ops.push_back(Chain); // Push back the Chain
938 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
939 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
940 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
941 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
945 // Extract the subregisters.
946 SuperReg = SDValue(VLdLn, 0);
947 unsigned Sub0 = AArch64::qsub_0;
948 // Update uses of each registers in super register
949 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
950 SDValue SUB0 = CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg);
952 SUB0 = CurDAG->getTargetExtractSubreg(AArch64::sub_64, dl, VT64, SUB0);
954 ReplaceUses(SDValue(N, Vec), SUB0);
956 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
958 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
962 unsigned AArch64DAGToDAGISel::getTBLOpc(bool IsExt, bool Is64Bit,
964 assert(NumOfVec >= 1 && NumOfVec <= 4 && "VST NumVecs out-of-range");
972 Opc = Is64Bit ? AArch64::TBX1_8b : AArch64::TBX1_16b;
974 Opc = Is64Bit ? AArch64::TBL1_8b : AArch64::TBL1_16b;
978 Opc = Is64Bit ? AArch64::TBX2_8b : AArch64::TBX2_16b;
980 Opc = Is64Bit ? AArch64::TBL2_8b : AArch64::TBL2_16b;
984 Opc = Is64Bit ? AArch64::TBX3_8b : AArch64::TBX3_16b;
986 Opc = Is64Bit ? AArch64::TBL3_8b : AArch64::TBL3_16b;
990 Opc = Is64Bit ? AArch64::TBX4_8b : AArch64::TBX4_16b;
992 Opc = Is64Bit ? AArch64::TBL4_8b : AArch64::TBL4_16b;
999 SDNode *AArch64DAGToDAGISel::SelectVTBL(SDNode *N, unsigned NumVecs,
1001 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1004 // Check the element of look up table is 64-bit or not
1005 unsigned Vec0Idx = IsExt ? 2 : 1;
1006 assert(!N->getOperand(Vec0Idx + 0).getValueType().is64BitVector() &&
1007 "The element of lookup table for vtbl and vtbx must be 128-bit");
1009 // Check the return value type is 64-bit or not
1010 EVT ResVT = N->getValueType(0);
1011 bool is64BitRes = ResVT.is64BitVector();
1013 // Create new SDValue for vector list
1014 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Idx,
1015 N->op_begin() + Vec0Idx + NumVecs);
1016 SDValue TblReg = createQTuple(Regs);
1017 unsigned Opc = getTBLOpc(IsExt, is64BitRes, NumVecs);
1019 SmallVector<SDValue, 3> Ops;
1021 Ops.push_back(N->getOperand(1));
1022 Ops.push_back(TblReg);
1023 Ops.push_back(N->getOperand(Vec0Idx + NumVecs));
1024 return CurDAG->getMachineNode(Opc, dl, ResVT, Ops);
1027 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
1028 // Dump information about the Node being selected
1029 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n");
1031 if (Node->isMachineOpcode()) {
1032 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
1033 Node->setNodeId(-1);
1037 switch (Node->getOpcode()) {
1038 case ISD::ATOMIC_LOAD_ADD:
1039 return SelectAtomic(Node,
1040 AArch64::ATOMIC_LOAD_ADD_I8,
1041 AArch64::ATOMIC_LOAD_ADD_I16,
1042 AArch64::ATOMIC_LOAD_ADD_I32,
1043 AArch64::ATOMIC_LOAD_ADD_I64);
1044 case ISD::ATOMIC_LOAD_SUB:
1045 return SelectAtomic(Node,
1046 AArch64::ATOMIC_LOAD_SUB_I8,
1047 AArch64::ATOMIC_LOAD_SUB_I16,
1048 AArch64::ATOMIC_LOAD_SUB_I32,
1049 AArch64::ATOMIC_LOAD_SUB_I64);
1050 case ISD::ATOMIC_LOAD_AND:
1051 return SelectAtomic(Node,
1052 AArch64::ATOMIC_LOAD_AND_I8,
1053 AArch64::ATOMIC_LOAD_AND_I16,
1054 AArch64::ATOMIC_LOAD_AND_I32,
1055 AArch64::ATOMIC_LOAD_AND_I64);
1056 case ISD::ATOMIC_LOAD_OR:
1057 return SelectAtomic(Node,
1058 AArch64::ATOMIC_LOAD_OR_I8,
1059 AArch64::ATOMIC_LOAD_OR_I16,
1060 AArch64::ATOMIC_LOAD_OR_I32,
1061 AArch64::ATOMIC_LOAD_OR_I64);
1062 case ISD::ATOMIC_LOAD_XOR:
1063 return SelectAtomic(Node,
1064 AArch64::ATOMIC_LOAD_XOR_I8,
1065 AArch64::ATOMIC_LOAD_XOR_I16,
1066 AArch64::ATOMIC_LOAD_XOR_I32,
1067 AArch64::ATOMIC_LOAD_XOR_I64);
1068 case ISD::ATOMIC_LOAD_NAND:
1069 return SelectAtomic(Node,
1070 AArch64::ATOMIC_LOAD_NAND_I8,
1071 AArch64::ATOMIC_LOAD_NAND_I16,
1072 AArch64::ATOMIC_LOAD_NAND_I32,
1073 AArch64::ATOMIC_LOAD_NAND_I64);
1074 case ISD::ATOMIC_LOAD_MIN:
1075 return SelectAtomic(Node,
1076 AArch64::ATOMIC_LOAD_MIN_I8,
1077 AArch64::ATOMIC_LOAD_MIN_I16,
1078 AArch64::ATOMIC_LOAD_MIN_I32,
1079 AArch64::ATOMIC_LOAD_MIN_I64);
1080 case ISD::ATOMIC_LOAD_MAX:
1081 return SelectAtomic(Node,
1082 AArch64::ATOMIC_LOAD_MAX_I8,
1083 AArch64::ATOMIC_LOAD_MAX_I16,
1084 AArch64::ATOMIC_LOAD_MAX_I32,
1085 AArch64::ATOMIC_LOAD_MAX_I64);
1086 case ISD::ATOMIC_LOAD_UMIN:
1087 return SelectAtomic(Node,
1088 AArch64::ATOMIC_LOAD_UMIN_I8,
1089 AArch64::ATOMIC_LOAD_UMIN_I16,
1090 AArch64::ATOMIC_LOAD_UMIN_I32,
1091 AArch64::ATOMIC_LOAD_UMIN_I64);
1092 case ISD::ATOMIC_LOAD_UMAX:
1093 return SelectAtomic(Node,
1094 AArch64::ATOMIC_LOAD_UMAX_I8,
1095 AArch64::ATOMIC_LOAD_UMAX_I16,
1096 AArch64::ATOMIC_LOAD_UMAX_I32,
1097 AArch64::ATOMIC_LOAD_UMAX_I64);
1098 case ISD::ATOMIC_SWAP:
1099 return SelectAtomic(Node,
1100 AArch64::ATOMIC_SWAP_I8,
1101 AArch64::ATOMIC_SWAP_I16,
1102 AArch64::ATOMIC_SWAP_I32,
1103 AArch64::ATOMIC_SWAP_I64);
1104 case ISD::ATOMIC_CMP_SWAP:
1105 return SelectAtomic(Node,
1106 AArch64::ATOMIC_CMP_SWAP_I8,
1107 AArch64::ATOMIC_CMP_SWAP_I16,
1108 AArch64::ATOMIC_CMP_SWAP_I32,
1109 AArch64::ATOMIC_CMP_SWAP_I64);
1110 case ISD::FrameIndex: {
1111 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
1112 EVT PtrTy = getTargetLowering()->getPointerTy();
1113 SDValue TFI = CurDAG->getTargetFrameIndex(FI, PtrTy);
1114 return CurDAG->SelectNodeTo(Node, AArch64::ADDxxi_lsl0_s, PtrTy,
1115 TFI, CurDAG->getTargetConstant(0, PtrTy));
1117 case ISD::Constant: {
1118 SDNode *ResNode = 0;
1119 if (cast<ConstantSDNode>(Node)->getZExtValue() == 0) {
1120 // XZR and WZR are probably even better than an actual move: most of the
1121 // time they can be folded into another instruction with *no* cost.
1123 EVT Ty = Node->getValueType(0);
1124 assert((Ty == MVT::i32 || Ty == MVT::i64) && "unexpected type");
1125 uint16_t Register = Ty == MVT::i32 ? AArch64::WZR : AArch64::XZR;
1126 ResNode = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1128 Register, Ty).getNode();
1131 // Next best option is a move-immediate, see if we can do that.
1133 ResNode = TrySelectToMoveImm(Node);
1139 // If even that fails we fall back to a lit-pool entry at the moment. Future
1140 // tuning may change this to a sequence of MOVZ/MOVN/MOVK instructions.
1141 ResNode = SelectToLitPool(Node);
1142 assert(ResNode && "We need *some* way to materialise a constant");
1144 // We want to continue selection at this point since the litpool access
1145 // generated used generic nodes for simplicity.
1146 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
1150 case ISD::ConstantFP: {
1151 if (A64Imms::isFPImm(cast<ConstantFPSDNode>(Node)->getValueAPF())) {
1152 // FMOV will take care of it from TableGen
1156 SDNode *ResNode = LowerToFPLitPool(Node);
1157 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
1159 // We want to continue selection at this point since the litpool access
1160 // generated used generic nodes for simplicity.
1164 case AArch64ISD::NEON_LD1_UPD: {
1165 static const uint16_t Opcodes[] = {
1166 AArch64::LD1WB_8B_fixed, AArch64::LD1WB_4H_fixed,
1167 AArch64::LD1WB_2S_fixed, AArch64::LD1WB_1D_fixed,
1168 AArch64::LD1WB_16B_fixed, AArch64::LD1WB_8H_fixed,
1169 AArch64::LD1WB_4S_fixed, AArch64::LD1WB_2D_fixed
1171 return SelectVLD(Node, true, 1, Opcodes);
1173 case AArch64ISD::NEON_LD2_UPD: {
1174 static const uint16_t Opcodes[] = {
1175 AArch64::LD2WB_8B_fixed, AArch64::LD2WB_4H_fixed,
1176 AArch64::LD2WB_2S_fixed, AArch64::LD1x2WB_1D_fixed,
1177 AArch64::LD2WB_16B_fixed, AArch64::LD2WB_8H_fixed,
1178 AArch64::LD2WB_4S_fixed, AArch64::LD2WB_2D_fixed
1180 return SelectVLD(Node, true, 2, Opcodes);
1182 case AArch64ISD::NEON_LD3_UPD: {
1183 static const uint16_t Opcodes[] = {
1184 AArch64::LD3WB_8B_fixed, AArch64::LD3WB_4H_fixed,
1185 AArch64::LD3WB_2S_fixed, AArch64::LD1x3WB_1D_fixed,
1186 AArch64::LD3WB_16B_fixed, AArch64::LD3WB_8H_fixed,
1187 AArch64::LD3WB_4S_fixed, AArch64::LD3WB_2D_fixed
1189 return SelectVLD(Node, true, 3, Opcodes);
1191 case AArch64ISD::NEON_LD4_UPD: {
1192 static const uint16_t Opcodes[] = {
1193 AArch64::LD4WB_8B_fixed, AArch64::LD4WB_4H_fixed,
1194 AArch64::LD4WB_2S_fixed, AArch64::LD1x4WB_1D_fixed,
1195 AArch64::LD4WB_16B_fixed, AArch64::LD4WB_8H_fixed,
1196 AArch64::LD4WB_4S_fixed, AArch64::LD4WB_2D_fixed
1198 return SelectVLD(Node, true, 4, Opcodes);
1200 case AArch64ISD::NEON_LD1x2_UPD: {
1201 static const uint16_t Opcodes[] = {
1202 AArch64::LD1x2WB_8B_fixed, AArch64::LD1x2WB_4H_fixed,
1203 AArch64::LD1x2WB_2S_fixed, AArch64::LD1x2WB_1D_fixed,
1204 AArch64::LD1x2WB_16B_fixed, AArch64::LD1x2WB_8H_fixed,
1205 AArch64::LD1x2WB_4S_fixed, AArch64::LD1x2WB_2D_fixed
1207 return SelectVLD(Node, true, 2, Opcodes);
1209 case AArch64ISD::NEON_LD1x3_UPD: {
1210 static const uint16_t Opcodes[] = {
1211 AArch64::LD1x3WB_8B_fixed, AArch64::LD1x3WB_4H_fixed,
1212 AArch64::LD1x3WB_2S_fixed, AArch64::LD1x3WB_1D_fixed,
1213 AArch64::LD1x3WB_16B_fixed, AArch64::LD1x3WB_8H_fixed,
1214 AArch64::LD1x3WB_4S_fixed, AArch64::LD1x3WB_2D_fixed
1216 return SelectVLD(Node, true, 3, Opcodes);
1218 case AArch64ISD::NEON_LD1x4_UPD: {
1219 static const uint16_t Opcodes[] = {
1220 AArch64::LD1x4WB_8B_fixed, AArch64::LD1x4WB_4H_fixed,
1221 AArch64::LD1x4WB_2S_fixed, AArch64::LD1x4WB_1D_fixed,
1222 AArch64::LD1x4WB_16B_fixed, AArch64::LD1x4WB_8H_fixed,
1223 AArch64::LD1x4WB_4S_fixed, AArch64::LD1x4WB_2D_fixed
1225 return SelectVLD(Node, true, 4, Opcodes);
1227 case AArch64ISD::NEON_ST1_UPD: {
1228 static const uint16_t Opcodes[] = {
1229 AArch64::ST1WB_8B_fixed, AArch64::ST1WB_4H_fixed,
1230 AArch64::ST1WB_2S_fixed, AArch64::ST1WB_1D_fixed,
1231 AArch64::ST1WB_16B_fixed, AArch64::ST1WB_8H_fixed,
1232 AArch64::ST1WB_4S_fixed, AArch64::ST1WB_2D_fixed
1234 return SelectVST(Node, true, 1, Opcodes);
1236 case AArch64ISD::NEON_ST2_UPD: {
1237 static const uint16_t Opcodes[] = {
1238 AArch64::ST2WB_8B_fixed, AArch64::ST2WB_4H_fixed,
1239 AArch64::ST2WB_2S_fixed, AArch64::ST1x2WB_1D_fixed,
1240 AArch64::ST2WB_16B_fixed, AArch64::ST2WB_8H_fixed,
1241 AArch64::ST2WB_4S_fixed, AArch64::ST2WB_2D_fixed
1243 return SelectVST(Node, true, 2, Opcodes);
1245 case AArch64ISD::NEON_ST3_UPD: {
1246 static const uint16_t Opcodes[] = {
1247 AArch64::ST3WB_8B_fixed, AArch64::ST3WB_4H_fixed,
1248 AArch64::ST3WB_2S_fixed, AArch64::ST1x3WB_1D_fixed,
1249 AArch64::ST3WB_16B_fixed, AArch64::ST3WB_8H_fixed,
1250 AArch64::ST3WB_4S_fixed, AArch64::ST3WB_2D_fixed
1252 return SelectVST(Node, true, 3, Opcodes);
1254 case AArch64ISD::NEON_ST4_UPD: {
1255 static const uint16_t Opcodes[] = {
1256 AArch64::ST4WB_8B_fixed, AArch64::ST4WB_4H_fixed,
1257 AArch64::ST4WB_2S_fixed, AArch64::ST1x4WB_1D_fixed,
1258 AArch64::ST4WB_16B_fixed, AArch64::ST4WB_8H_fixed,
1259 AArch64::ST4WB_4S_fixed, AArch64::ST4WB_2D_fixed
1261 return SelectVST(Node, true, 4, Opcodes);
1263 case AArch64ISD::NEON_LD2DUP: {
1264 static const uint16_t Opcodes[] = {
1265 AArch64::LD2R_8B, AArch64::LD2R_4H, AArch64::LD2R_2S,
1266 AArch64::LD2R_1D, AArch64::LD2R_16B, AArch64::LD2R_8H,
1267 AArch64::LD2R_4S, AArch64::LD2R_2D
1269 return SelectVLDDup(Node, false, 2, Opcodes);
1271 case AArch64ISD::NEON_LD3DUP: {
1272 static const uint16_t Opcodes[] = {
1273 AArch64::LD3R_8B, AArch64::LD3R_4H, AArch64::LD3R_2S,
1274 AArch64::LD3R_1D, AArch64::LD3R_16B, AArch64::LD3R_8H,
1275 AArch64::LD3R_4S, AArch64::LD3R_2D
1277 return SelectVLDDup(Node, false, 3, Opcodes);
1279 case AArch64ISD::NEON_LD4DUP: {
1280 static const uint16_t Opcodes[] = {
1281 AArch64::LD4R_8B, AArch64::LD4R_4H, AArch64::LD4R_2S,
1282 AArch64::LD4R_1D, AArch64::LD4R_16B, AArch64::LD4R_8H,
1283 AArch64::LD4R_4S, AArch64::LD4R_2D
1285 return SelectVLDDup(Node, false, 4, Opcodes);
1287 case AArch64ISD::NEON_LD2DUP_UPD: {
1288 static const uint16_t Opcodes[] = {
1289 AArch64::LD2R_WB_8B_fixed, AArch64::LD2R_WB_4H_fixed,
1290 AArch64::LD2R_WB_2S_fixed, AArch64::LD2R_WB_1D_fixed,
1291 AArch64::LD2R_WB_16B_fixed, AArch64::LD2R_WB_8H_fixed,
1292 AArch64::LD2R_WB_4S_fixed, AArch64::LD2R_WB_2D_fixed
1294 return SelectVLDDup(Node, true, 2, Opcodes);
1296 case AArch64ISD::NEON_LD3DUP_UPD: {
1297 static const uint16_t Opcodes[] = {
1298 AArch64::LD3R_WB_8B_fixed, AArch64::LD3R_WB_4H_fixed,
1299 AArch64::LD3R_WB_2S_fixed, AArch64::LD3R_WB_1D_fixed,
1300 AArch64::LD3R_WB_16B_fixed, AArch64::LD3R_WB_8H_fixed,
1301 AArch64::LD3R_WB_4S_fixed, AArch64::LD3R_WB_2D_fixed
1303 return SelectVLDDup(Node, true, 3, Opcodes);
1305 case AArch64ISD::NEON_LD4DUP_UPD: {
1306 static const uint16_t Opcodes[] = {
1307 AArch64::LD4R_WB_8B_fixed, AArch64::LD4R_WB_4H_fixed,
1308 AArch64::LD4R_WB_2S_fixed, AArch64::LD4R_WB_1D_fixed,
1309 AArch64::LD4R_WB_16B_fixed, AArch64::LD4R_WB_8H_fixed,
1310 AArch64::LD4R_WB_4S_fixed, AArch64::LD4R_WB_2D_fixed
1312 return SelectVLDDup(Node, true, 4, Opcodes);
1314 case AArch64ISD::NEON_LD2LN_UPD: {
1315 static const uint16_t Opcodes[] = {
1316 AArch64::LD2LN_WB_B_fixed, AArch64::LD2LN_WB_H_fixed,
1317 AArch64::LD2LN_WB_S_fixed, AArch64::LD2LN_WB_D_fixed
1319 return SelectVLDSTLane(Node, true, true, 2, Opcodes);
1321 case AArch64ISD::NEON_LD3LN_UPD: {
1322 static const uint16_t Opcodes[] = {
1323 AArch64::LD3LN_WB_B_fixed, AArch64::LD3LN_WB_H_fixed,
1324 AArch64::LD3LN_WB_S_fixed, AArch64::LD3LN_WB_D_fixed
1326 return SelectVLDSTLane(Node, true, true, 3, Opcodes);
1328 case AArch64ISD::NEON_LD4LN_UPD: {
1329 static const uint16_t Opcodes[] = {
1330 AArch64::LD4LN_WB_B_fixed, AArch64::LD4LN_WB_H_fixed,
1331 AArch64::LD4LN_WB_S_fixed, AArch64::LD4LN_WB_D_fixed
1333 return SelectVLDSTLane(Node, true, true, 4, Opcodes);
1335 case AArch64ISD::NEON_ST2LN_UPD: {
1336 static const uint16_t Opcodes[] = {
1337 AArch64::ST2LN_WB_B_fixed, AArch64::ST2LN_WB_H_fixed,
1338 AArch64::ST2LN_WB_S_fixed, AArch64::ST2LN_WB_D_fixed
1340 return SelectVLDSTLane(Node, false, true, 2, Opcodes);
1342 case AArch64ISD::NEON_ST3LN_UPD: {
1343 static const uint16_t Opcodes[] = {
1344 AArch64::ST3LN_WB_B_fixed, AArch64::ST3LN_WB_H_fixed,
1345 AArch64::ST3LN_WB_S_fixed, AArch64::ST3LN_WB_D_fixed
1347 return SelectVLDSTLane(Node, false, true, 3, Opcodes);
1349 case AArch64ISD::NEON_ST4LN_UPD: {
1350 static const uint16_t Opcodes[] = {
1351 AArch64::ST4LN_WB_B_fixed, AArch64::ST4LN_WB_H_fixed,
1352 AArch64::ST4LN_WB_S_fixed, AArch64::ST4LN_WB_D_fixed
1354 return SelectVLDSTLane(Node, false, true, 4, Opcodes);
1356 case AArch64ISD::NEON_ST1x2_UPD: {
1357 static const uint16_t Opcodes[] = {
1358 AArch64::ST1x2WB_8B_fixed, AArch64::ST1x2WB_4H_fixed,
1359 AArch64::ST1x2WB_2S_fixed, AArch64::ST1x2WB_1D_fixed,
1360 AArch64::ST1x2WB_16B_fixed, AArch64::ST1x2WB_8H_fixed,
1361 AArch64::ST1x2WB_4S_fixed, AArch64::ST1x2WB_2D_fixed
1363 return SelectVST(Node, true, 2, Opcodes);
1365 case AArch64ISD::NEON_ST1x3_UPD: {
1366 static const uint16_t Opcodes[] = {
1367 AArch64::ST1x3WB_8B_fixed, AArch64::ST1x3WB_4H_fixed,
1368 AArch64::ST1x3WB_2S_fixed, AArch64::ST1x3WB_1D_fixed,
1369 AArch64::ST1x3WB_16B_fixed, AArch64::ST1x3WB_8H_fixed,
1370 AArch64::ST1x3WB_4S_fixed, AArch64::ST1x3WB_2D_fixed
1372 return SelectVST(Node, true, 3, Opcodes);
1374 case AArch64ISD::NEON_ST1x4_UPD: {
1375 static const uint16_t Opcodes[] = {
1376 AArch64::ST1x4WB_8B_fixed, AArch64::ST1x4WB_4H_fixed,
1377 AArch64::ST1x4WB_2S_fixed, AArch64::ST1x4WB_1D_fixed,
1378 AArch64::ST1x4WB_16B_fixed, AArch64::ST1x4WB_8H_fixed,
1379 AArch64::ST1x4WB_4S_fixed, AArch64::ST1x4WB_2D_fixed
1381 return SelectVST(Node, true, 4, Opcodes);
1383 case ISD::INTRINSIC_WO_CHAIN: {
1384 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
1389 case Intrinsic::aarch64_neon_vtbx1:
1391 case Intrinsic::aarch64_neon_vtbl1:
1392 return SelectVTBL(Node, 1, IsExt);
1393 case Intrinsic::aarch64_neon_vtbx2:
1395 case Intrinsic::aarch64_neon_vtbl2:
1396 return SelectVTBL(Node, 2, IsExt);
1397 case Intrinsic::aarch64_neon_vtbx3:
1399 case Intrinsic::aarch64_neon_vtbl3:
1400 return SelectVTBL(Node, 3, IsExt);
1401 case Intrinsic::aarch64_neon_vtbx4:
1403 case Intrinsic::aarch64_neon_vtbl4:
1404 return SelectVTBL(Node, 4, IsExt);
1408 case ISD::INTRINSIC_VOID:
1409 case ISD::INTRINSIC_W_CHAIN: {
1410 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1414 case Intrinsic::arm_neon_vld1: {
1415 static const uint16_t Opcodes[] = {
1416 AArch64::LD1_8B, AArch64::LD1_4H, AArch64::LD1_2S, AArch64::LD1_1D,
1417 AArch64::LD1_16B, AArch64::LD1_8H, AArch64::LD1_4S, AArch64::LD1_2D
1419 return SelectVLD(Node, false, 1, Opcodes);
1421 case Intrinsic::arm_neon_vld2: {
1422 static const uint16_t Opcodes[] = {
1423 AArch64::LD2_8B, AArch64::LD2_4H, AArch64::LD2_2S, AArch64::LD1x2_1D,
1424 AArch64::LD2_16B, AArch64::LD2_8H, AArch64::LD2_4S, AArch64::LD2_2D
1426 return SelectVLD(Node, false, 2, Opcodes);
1428 case Intrinsic::arm_neon_vld3: {
1429 static const uint16_t Opcodes[] = {
1430 AArch64::LD3_8B, AArch64::LD3_4H, AArch64::LD3_2S, AArch64::LD1x3_1D,
1431 AArch64::LD3_16B, AArch64::LD3_8H, AArch64::LD3_4S, AArch64::LD3_2D
1433 return SelectVLD(Node, false, 3, Opcodes);
1435 case Intrinsic::arm_neon_vld4: {
1436 static const uint16_t Opcodes[] = {
1437 AArch64::LD4_8B, AArch64::LD4_4H, AArch64::LD4_2S, AArch64::LD1x4_1D,
1438 AArch64::LD4_16B, AArch64::LD4_8H, AArch64::LD4_4S, AArch64::LD4_2D
1440 return SelectVLD(Node, false, 4, Opcodes);
1442 case Intrinsic::aarch64_neon_vld1x2: {
1443 static const uint16_t Opcodes[] = {
1444 AArch64::LD1x2_8B, AArch64::LD1x2_4H, AArch64::LD1x2_2S,
1445 AArch64::LD1x2_1D, AArch64::LD1x2_16B, AArch64::LD1x2_8H,
1446 AArch64::LD1x2_4S, AArch64::LD1x2_2D
1448 return SelectVLD(Node, false, 2, Opcodes);
1450 case Intrinsic::aarch64_neon_vld1x3: {
1451 static const uint16_t Opcodes[] = {
1452 AArch64::LD1x3_8B, AArch64::LD1x3_4H, AArch64::LD1x3_2S,
1453 AArch64::LD1x3_1D, AArch64::LD1x3_16B, AArch64::LD1x3_8H,
1454 AArch64::LD1x3_4S, AArch64::LD1x3_2D
1456 return SelectVLD(Node, false, 3, Opcodes);
1458 case Intrinsic::aarch64_neon_vld1x4: {
1459 static const uint16_t Opcodes[] = {
1460 AArch64::LD1x4_8B, AArch64::LD1x4_4H, AArch64::LD1x4_2S,
1461 AArch64::LD1x4_1D, AArch64::LD1x4_16B, AArch64::LD1x4_8H,
1462 AArch64::LD1x4_4S, AArch64::LD1x4_2D
1464 return SelectVLD(Node, false, 4, Opcodes);
1466 case Intrinsic::arm_neon_vst1: {
1467 static const uint16_t Opcodes[] = {
1468 AArch64::ST1_8B, AArch64::ST1_4H, AArch64::ST1_2S, AArch64::ST1_1D,
1469 AArch64::ST1_16B, AArch64::ST1_8H, AArch64::ST1_4S, AArch64::ST1_2D
1471 return SelectVST(Node, false, 1, Opcodes);
1473 case Intrinsic::arm_neon_vst2: {
1474 static const uint16_t Opcodes[] = {
1475 AArch64::ST2_8B, AArch64::ST2_4H, AArch64::ST2_2S, AArch64::ST1x2_1D,
1476 AArch64::ST2_16B, AArch64::ST2_8H, AArch64::ST2_4S, AArch64::ST2_2D
1478 return SelectVST(Node, false, 2, Opcodes);
1480 case Intrinsic::arm_neon_vst3: {
1481 static const uint16_t Opcodes[] = {
1482 AArch64::ST3_8B, AArch64::ST3_4H, AArch64::ST3_2S, AArch64::ST1x3_1D,
1483 AArch64::ST3_16B, AArch64::ST3_8H, AArch64::ST3_4S, AArch64::ST3_2D
1485 return SelectVST(Node, false, 3, Opcodes);
1487 case Intrinsic::arm_neon_vst4: {
1488 static const uint16_t Opcodes[] = {
1489 AArch64::ST4_8B, AArch64::ST4_4H, AArch64::ST4_2S, AArch64::ST1x4_1D,
1490 AArch64::ST4_16B, AArch64::ST4_8H, AArch64::ST4_4S, AArch64::ST4_2D
1492 return SelectVST(Node, false, 4, Opcodes);
1494 case Intrinsic::aarch64_neon_vst1x2: {
1495 static const uint16_t Opcodes[] = {
1496 AArch64::ST1x2_8B, AArch64::ST1x2_4H, AArch64::ST1x2_2S,
1497 AArch64::ST1x2_1D, AArch64::ST1x2_16B, AArch64::ST1x2_8H,
1498 AArch64::ST1x2_4S, AArch64::ST1x2_2D
1500 return SelectVST(Node, false, 2, Opcodes);
1502 case Intrinsic::aarch64_neon_vst1x3: {
1503 static const uint16_t Opcodes[] = {
1504 AArch64::ST1x3_8B, AArch64::ST1x3_4H, AArch64::ST1x3_2S,
1505 AArch64::ST1x3_1D, AArch64::ST1x3_16B, AArch64::ST1x3_8H,
1506 AArch64::ST1x3_4S, AArch64::ST1x3_2D
1508 return SelectVST(Node, false, 3, Opcodes);
1510 case Intrinsic::aarch64_neon_vst1x4: {
1511 static const uint16_t Opcodes[] = {
1512 AArch64::ST1x4_8B, AArch64::ST1x4_4H, AArch64::ST1x4_2S,
1513 AArch64::ST1x4_1D, AArch64::ST1x4_16B, AArch64::ST1x4_8H,
1514 AArch64::ST1x4_4S, AArch64::ST1x4_2D
1516 return SelectVST(Node, false, 4, Opcodes);
1518 case Intrinsic::arm_neon_vld2lane: {
1519 static const uint16_t Opcodes[] = {
1520 AArch64::LD2LN_B, AArch64::LD2LN_H, AArch64::LD2LN_S, AArch64::LD2LN_D
1522 return SelectVLDSTLane(Node, true, false, 2, Opcodes);
1524 case Intrinsic::arm_neon_vld3lane: {
1525 static const uint16_t Opcodes[] = {
1526 AArch64::LD3LN_B, AArch64::LD3LN_H, AArch64::LD3LN_S, AArch64::LD3LN_D
1528 return SelectVLDSTLane(Node, true, false, 3, Opcodes);
1530 case Intrinsic::arm_neon_vld4lane: {
1531 static const uint16_t Opcodes[] = {
1532 AArch64::LD4LN_B, AArch64::LD4LN_H, AArch64::LD4LN_S, AArch64::LD4LN_D
1534 return SelectVLDSTLane(Node, true, false, 4, Opcodes);
1536 case Intrinsic::arm_neon_vst2lane: {
1537 static const uint16_t Opcodes[] = {
1538 AArch64::ST2LN_B, AArch64::ST2LN_H, AArch64::ST2LN_S, AArch64::ST2LN_D
1540 return SelectVLDSTLane(Node, false, false, 2, Opcodes);
1542 case Intrinsic::arm_neon_vst3lane: {
1543 static const uint16_t Opcodes[] = {
1544 AArch64::ST3LN_B, AArch64::ST3LN_H, AArch64::ST3LN_S, AArch64::ST3LN_D
1546 return SelectVLDSTLane(Node, false, false, 3, Opcodes);
1548 case Intrinsic::arm_neon_vst4lane: {
1549 static const uint16_t Opcodes[] = {
1550 AArch64::ST4LN_B, AArch64::ST4LN_H, AArch64::ST4LN_S, AArch64::ST4LN_D
1552 return SelectVLDSTLane(Node, false, false, 4, Opcodes);
1554 } // End of switch IntNo
1556 } // End of case ISD::INTRINSIC_VOID and :ISD::INTRINSIC_W_CHAIN
1558 break; // Let generic code handle it
1561 SDNode *ResNode = SelectCode(Node);
1563 DEBUG(dbgs() << "=> ";
1564 if (ResNode == NULL || ResNode == Node)
1567 ResNode->dump(CurDAG);
1573 /// This pass converts a legalized DAG into a AArch64-specific DAG, ready for
1574 /// instruction scheduling.
1575 FunctionPass *llvm::createAArch64ISelDAG(AArch64TargetMachine &TM,
1576 CodeGenOpt::Level OptLevel) {
1577 return new AArch64DAGToDAGISel(TM, OptLevel);