1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the AArch64 target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "aarch64-isel"
16 #include "AArch64InstrInfo.h"
17 #include "AArch64Subtarget.h"
18 #include "AArch64TargetMachine.h"
19 #include "Utils/AArch64BaseInfo.h"
20 #include "llvm/ADT/APSInt.h"
21 #include "llvm/CodeGen/SelectionDAGISel.h"
22 #include "llvm/IR/GlobalValue.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
28 //===--------------------------------------------------------------------===//
29 /// AArch64 specific code to select AArch64 machine instructions for
30 /// SelectionDAG operations.
34 class AArch64DAGToDAGISel : public SelectionDAGISel {
35 AArch64TargetMachine &TM;
37 /// Keep a pointer to the AArch64Subtarget around so that we can
38 /// make the right decision when generating code for different targets.
39 const AArch64Subtarget *Subtarget;
42 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
43 CodeGenOpt::Level OptLevel)
44 : SelectionDAGISel(tm, OptLevel), TM(tm),
45 Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
48 virtual const char *getPassName() const {
49 return "AArch64 Instruction Selection";
52 // Include the pieces autogenerated from the target description.
53 #include "AArch64GenDAGISel.inc"
55 template<unsigned MemSize>
56 bool SelectOffsetUImm12(SDValue N, SDValue &UImm12) {
57 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
58 if (!CN || CN->getZExtValue() % MemSize != 0
59 || CN->getZExtValue() / MemSize > 0xfff)
62 UImm12 = CurDAG->getTargetConstant(CN->getZExtValue() / MemSize, MVT::i64);
66 template<unsigned RegWidth>
67 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
68 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
71 /// Used for pre-lowered address-reference nodes, so we already know
72 /// the fields match. This operand's job is simply to add an
73 /// appropriate shift operand to the MOVZ/MOVK instruction.
74 template<unsigned LogShift>
75 bool SelectMOVWAddressRef(SDValue N, SDValue &Imm, SDValue &Shift) {
77 Shift = CurDAG->getTargetConstant(LogShift, MVT::i32);
81 bool SelectFPZeroOperand(SDValue N, SDValue &Dummy);
83 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
86 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
88 std::vector<SDValue> &OutOps);
90 bool SelectLogicalImm(SDValue N, SDValue &Imm);
92 template<unsigned RegWidth>
93 bool SelectTSTBOperand(SDValue N, SDValue &FixedPos) {
94 return SelectTSTBOperand(N, FixedPos, RegWidth);
97 bool SelectTSTBOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth);
99 SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32,
102 /// Put the given constant into a pool and return a DAG which will give its
104 SDValue getConstantPoolItemAddress(SDLoc DL, const Constant *CV);
106 SDNode *TrySelectToMoveImm(SDNode *N);
107 SDNode *LowerToFPLitPool(SDNode *Node);
108 SDNode *SelectToLitPool(SDNode *N);
110 SDNode* Select(SDNode*);
112 /// Get the opcode for table lookup instruction
113 unsigned getTBLOpc(bool IsExt, bool Is64Bit, unsigned NumOfVec);
115 /// Select NEON table lookup intrinsics. NumVecs should be 1, 2, 3 or 4.
116 /// IsExt is to indicate if the result will be extended with an argument.
117 SDNode *SelectVTBL(SDNode *N, unsigned NumVecs, bool IsExt);
119 /// Select NEON load intrinsics. NumVecs should be 1, 2, 3 or 4.
120 SDNode *SelectVLD(SDNode *N, unsigned NumVecs, bool isUpdating,
121 const uint16_t *Opcode);
123 /// Select NEON store intrinsics. NumVecs should be 1, 2, 3 or 4.
124 SDNode *SelectVST(SDNode *N, unsigned NumVecs, bool isUpdating,
125 const uint16_t *Opcodes);
127 /// Form sequences of consecutive 64/128-bit registers for use in NEON
128 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
129 /// between 1 and 4 elements. If it contains a single element that is returned
130 /// unchanged; otherwise a REG_SEQUENCE value is returned.
131 SDValue createDTuple(ArrayRef<SDValue> Vecs);
132 SDValue createQTuple(ArrayRef<SDValue> Vecs);
134 /// Generic helper for the createDTuple/createQTuple
135 /// functions. Those should almost always be called instead.
136 SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
142 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
144 const ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
145 if (!CN) return false;
147 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
148 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
151 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
152 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
156 // fbits is between 1 and 64 in the worst-case, which means the fmul
157 // could have 2^64 as an actual operand. Need 65 bits of precision.
158 APSInt IntVal(65, true);
159 CN->getValueAPF().convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
161 // N.b. isPowerOf2 also checks for > 0.
162 if (!IsExact || !IntVal.isPowerOf2()) return false;
163 unsigned FBits = IntVal.logBase2();
165 // Checks above should have guaranteed that we haven't lost information in
166 // finding FBits, but it must still be in range.
167 if (FBits == 0 || FBits > RegWidth) return false;
169 FixedPos = CurDAG->getTargetConstant(64 - FBits, MVT::i32);
174 AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
176 std::vector<SDValue> &OutOps) {
177 switch (ConstraintCode) {
178 default: llvm_unreachable("Unrecognised AArch64 memory constraint");
180 // FIXME: more freedom is actually permitted for 'm'. We can go
181 // hunting for a base and an offset if we want. Of course, since
182 // we don't really know how the operand is going to be used we're
183 // probably restricted to the load/store pair's simm7 as an offset
186 OutOps.push_back(Op);
193 AArch64DAGToDAGISel::SelectFPZeroOperand(SDValue N, SDValue &Dummy) {
194 ConstantFPSDNode *Imm = dyn_cast<ConstantFPSDNode>(N);
195 if (!Imm || !Imm->getValueAPF().isPosZero())
198 // Doesn't actually carry any information, but keeps TableGen quiet.
199 Dummy = CurDAG->getTargetConstant(0, MVT::i32);
203 bool AArch64DAGToDAGISel::SelectLogicalImm(SDValue N, SDValue &Imm) {
205 uint32_t RegWidth = N.getValueType().getSizeInBits();
207 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
208 if (!CN) return false;
210 if (!A64Imms::isLogicalImm(RegWidth, CN->getZExtValue(), Bits))
213 Imm = CurDAG->getTargetConstant(Bits, MVT::i32);
217 SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) {
220 EVT DestType = Node->getValueType(0);
221 unsigned DestWidth = DestType.getSizeInBits();
226 uint32_t LogicalBits;
228 uint64_t BitPat = cast<ConstantSDNode>(Node)->getZExtValue();
229 if (A64Imms::isMOVZImm(DestWidth, BitPat, UImm16, Shift)) {
231 MOVOpcode = DestWidth == 64 ? AArch64::MOVZxii : AArch64::MOVZwii;
232 } else if (A64Imms::isMOVNImm(DestWidth, BitPat, UImm16, Shift)) {
234 MOVOpcode = DestWidth == 64 ? AArch64::MOVNxii : AArch64::MOVNwii;
235 } else if (DestWidth == 64 && A64Imms::isMOVNImm(32, BitPat, UImm16, Shift)) {
236 // To get something like 0x0000_0000_ffff_1234 into a 64-bit register we can
237 // use a 32-bit instruction: "movn w0, 0xedbc".
239 MOVOpcode = AArch64::MOVNwii;
240 } else if (A64Imms::isLogicalImm(DestWidth, BitPat, LogicalBits)) {
241 MOVOpcode = DestWidth == 64 ? AArch64::ORRxxi : AArch64::ORRwwi;
242 uint16_t ZR = DestWidth == 64 ? AArch64::XZR : AArch64::WZR;
244 return CurDAG->getMachineNode(MOVOpcode, dl, DestType,
245 CurDAG->getRegister(ZR, DestType),
246 CurDAG->getTargetConstant(LogicalBits, MVT::i32));
248 // Can't handle it in one instruction. There's scope for permitting two (or
249 // more) instructions, but that'll need more thought.
253 ResNode = CurDAG->getMachineNode(MOVOpcode, dl, MOVType,
254 CurDAG->getTargetConstant(UImm16, MVT::i32),
255 CurDAG->getTargetConstant(Shift, MVT::i32));
257 if (MOVType != DestType) {
258 ResNode = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
259 MVT::i64, MVT::i32, MVT::Other,
260 CurDAG->getTargetConstant(0, MVT::i64),
262 CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32));
269 AArch64DAGToDAGISel::getConstantPoolItemAddress(SDLoc DL,
270 const Constant *CV) {
271 EVT PtrVT = getTargetLowering()->getPointerTy();
273 switch (getTargetLowering()->getTargetMachine().getCodeModel()) {
274 case CodeModel::Small: {
276 getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
277 return CurDAG->getNode(
278 AArch64ISD::WrapperSmall, DL, PtrVT,
279 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_NO_FLAG),
280 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_LO12),
281 CurDAG->getConstant(Alignment, MVT::i32));
283 case CodeModel::Large: {
285 LitAddr = CurDAG->getMachineNode(
286 AArch64::MOVZxii, DL, PtrVT,
287 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G3),
288 CurDAG->getTargetConstant(3, MVT::i32));
289 LitAddr = CurDAG->getMachineNode(
290 AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
291 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC),
292 CurDAG->getTargetConstant(2, MVT::i32));
293 LitAddr = CurDAG->getMachineNode(
294 AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
295 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC),
296 CurDAG->getTargetConstant(1, MVT::i32));
297 LitAddr = CurDAG->getMachineNode(
298 AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
299 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC),
300 CurDAG->getTargetConstant(0, MVT::i32));
301 return SDValue(LitAddr, 0);
304 llvm_unreachable("Only small and large code models supported now");
308 SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
310 uint64_t UnsignedVal = cast<ConstantSDNode>(Node)->getZExtValue();
311 int64_t SignedVal = cast<ConstantSDNode>(Node)->getSExtValue();
312 EVT DestType = Node->getValueType(0);
314 // Since we may end up loading a 64-bit constant from a 32-bit entry the
315 // constant in the pool may have a different type to the eventual node.
316 ISD::LoadExtType Extension;
319 assert((DestType == MVT::i64 || DestType == MVT::i32)
320 && "Only expect integer constants at the moment");
322 if (DestType == MVT::i32) {
323 Extension = ISD::NON_EXTLOAD;
325 } else if (UnsignedVal <= UINT32_MAX) {
326 Extension = ISD::ZEXTLOAD;
328 } else if (SignedVal >= INT32_MIN && SignedVal <= INT32_MAX) {
329 Extension = ISD::SEXTLOAD;
332 Extension = ISD::NON_EXTLOAD;
336 Constant *CV = ConstantInt::get(Type::getIntNTy(*CurDAG->getContext(),
337 MemType.getSizeInBits()),
339 SDValue PoolAddr = getConstantPoolItemAddress(DL, CV);
341 getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
343 return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(),
345 MachinePointerInfo::getConstantPool(), MemType,
346 /* isVolatile = */ false,
347 /* isNonTemporal = */ false,
348 Alignment).getNode();
351 SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) {
353 const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue();
354 EVT DestType = Node->getValueType(0);
357 getTargetLowering()->getDataLayout()->getABITypeAlignment(FV->getType());
358 SDValue PoolAddr = getConstantPoolItemAddress(DL, FV);
360 return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr,
361 MachinePointerInfo::getConstantPool(),
362 /* isVolatile = */ false,
363 /* isNonTemporal = */ false,
364 /* isInvariant = */ true,
365 Alignment).getNode();
369 AArch64DAGToDAGISel::SelectTSTBOperand(SDValue N, SDValue &FixedPos,
371 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
372 if (!CN) return false;
374 uint64_t Val = CN->getZExtValue();
376 if (!isPowerOf2_64(Val)) return false;
378 unsigned TestedBit = Log2_64(Val);
379 // Checks above should have guaranteed that we haven't lost information in
380 // finding TestedBit, but it must still be in range.
381 if (TestedBit >= RegWidth) return false;
383 FixedPos = CurDAG->getTargetConstant(TestedBit, MVT::i64);
387 SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
388 unsigned Op16,unsigned Op32,
390 // Mostly direct translation to the given operations, except that we preserve
391 // the AtomicOrdering for use later on.
392 AtomicSDNode *AN = cast<AtomicSDNode>(Node);
393 EVT VT = AN->getMemoryVT();
398 else if (VT == MVT::i16)
400 else if (VT == MVT::i32)
402 else if (VT == MVT::i64)
405 llvm_unreachable("Unexpected atomic operation");
407 SmallVector<SDValue, 4> Ops;
408 for (unsigned i = 1; i < AN->getNumOperands(); ++i)
409 Ops.push_back(AN->getOperand(i));
411 Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
412 Ops.push_back(AN->getOperand(0)); // Chain moves to the end
414 return CurDAG->SelectNodeTo(Node, Op,
415 AN->getValueType(0), MVT::Other,
416 &Ops[0], Ops.size());
419 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
420 static unsigned RegClassIDs[] = { AArch64::DPairRegClassID,
421 AArch64::DTripleRegClassID,
422 AArch64::DQuadRegClassID };
423 static unsigned SubRegs[] = { AArch64::dsub_0, AArch64::dsub_1,
424 AArch64::dsub_2, AArch64::dsub_3 };
426 return createTuple(Regs, RegClassIDs, SubRegs);
429 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
430 static unsigned RegClassIDs[] = { AArch64::QPairRegClassID,
431 AArch64::QTripleRegClassID,
432 AArch64::QQuadRegClassID };
433 static unsigned SubRegs[] = { AArch64::qsub_0, AArch64::qsub_1,
434 AArch64::qsub_2, AArch64::qsub_3 };
436 return createTuple(Regs, RegClassIDs, SubRegs);
439 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
440 unsigned RegClassIDs[],
441 unsigned SubRegs[]) {
442 // There's no special register-class for a vector-list of 1 element: it's just
444 if (Regs.size() == 1)
447 assert(Regs.size() >= 2 && Regs.size() <= 4);
449 SDLoc DL(Regs[0].getNode());
451 SmallVector<SDValue, 4> Ops;
453 // First operand of REG_SEQUENCE is the desired RegClass.
455 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
457 // Then we get pairs of source & subregister-position for the components.
458 for (unsigned i = 0; i < Regs.size(); ++i) {
459 Ops.push_back(Regs[i]);
460 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
464 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
465 return SDValue(N, 0);
469 // Get the register stride update opcode of a VLD/VST instruction that
470 // is otherwise equivalent to the given fixed stride updating instruction.
471 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
474 case AArch64::LD1WB_8B_fixed: return AArch64::LD1WB_8B_register;
475 case AArch64::LD1WB_4H_fixed: return AArch64::LD1WB_4H_register;
476 case AArch64::LD1WB_2S_fixed: return AArch64::LD1WB_2S_register;
477 case AArch64::LD1WB_1D_fixed: return AArch64::LD1WB_1D_register;
478 case AArch64::LD1WB_16B_fixed: return AArch64::LD1WB_16B_register;
479 case AArch64::LD1WB_8H_fixed: return AArch64::LD1WB_8H_register;
480 case AArch64::LD1WB_4S_fixed: return AArch64::LD1WB_4S_register;
481 case AArch64::LD1WB_2D_fixed: return AArch64::LD1WB_2D_register;
483 case AArch64::LD2WB_8B_fixed: return AArch64::LD2WB_8B_register;
484 case AArch64::LD2WB_4H_fixed: return AArch64::LD2WB_4H_register;
485 case AArch64::LD2WB_2S_fixed: return AArch64::LD2WB_2S_register;
486 case AArch64::LD2WB_16B_fixed: return AArch64::LD2WB_16B_register;
487 case AArch64::LD2WB_8H_fixed: return AArch64::LD2WB_8H_register;
488 case AArch64::LD2WB_4S_fixed: return AArch64::LD2WB_4S_register;
489 case AArch64::LD2WB_2D_fixed: return AArch64::LD2WB_2D_register;
491 case AArch64::LD3WB_8B_fixed: return AArch64::LD3WB_8B_register;
492 case AArch64::LD3WB_4H_fixed: return AArch64::LD3WB_4H_register;
493 case AArch64::LD3WB_2S_fixed: return AArch64::LD3WB_2S_register;
494 case AArch64::LD3WB_16B_fixed: return AArch64::LD3WB_16B_register;
495 case AArch64::LD3WB_8H_fixed: return AArch64::LD3WB_8H_register;
496 case AArch64::LD3WB_4S_fixed: return AArch64::LD3WB_4S_register;
497 case AArch64::LD3WB_2D_fixed: return AArch64::LD3WB_2D_register;
499 case AArch64::LD4WB_8B_fixed: return AArch64::LD4WB_8B_register;
500 case AArch64::LD4WB_4H_fixed: return AArch64::LD4WB_4H_register;
501 case AArch64::LD4WB_2S_fixed: return AArch64::LD4WB_2S_register;
502 case AArch64::LD4WB_16B_fixed: return AArch64::LD4WB_16B_register;
503 case AArch64::LD4WB_8H_fixed: return AArch64::LD4WB_8H_register;
504 case AArch64::LD4WB_4S_fixed: return AArch64::LD4WB_4S_register;
505 case AArch64::LD4WB_2D_fixed: return AArch64::LD4WB_2D_register;
507 case AArch64::LD1x2WB_8B_fixed: return AArch64::LD1x2WB_8B_register;
508 case AArch64::LD1x2WB_4H_fixed: return AArch64::LD1x2WB_4H_register;
509 case AArch64::LD1x2WB_2S_fixed: return AArch64::LD1x2WB_2S_register;
510 case AArch64::LD1x2WB_1D_fixed: return AArch64::LD1x2WB_1D_register;
511 case AArch64::LD1x2WB_16B_fixed: return AArch64::LD1x2WB_16B_register;
512 case AArch64::LD1x2WB_8H_fixed: return AArch64::LD1x2WB_8H_register;
513 case AArch64::LD1x2WB_4S_fixed: return AArch64::LD1x2WB_4S_register;
514 case AArch64::LD1x2WB_2D_fixed: return AArch64::LD1x2WB_2D_register;
516 case AArch64::LD1x3WB_8B_fixed: return AArch64::LD1x3WB_8B_register;
517 case AArch64::LD1x3WB_4H_fixed: return AArch64::LD1x3WB_4H_register;
518 case AArch64::LD1x3WB_2S_fixed: return AArch64::LD1x3WB_2S_register;
519 case AArch64::LD1x3WB_1D_fixed: return AArch64::LD1x3WB_1D_register;
520 case AArch64::LD1x3WB_16B_fixed: return AArch64::LD1x3WB_16B_register;
521 case AArch64::LD1x3WB_8H_fixed: return AArch64::LD1x3WB_8H_register;
522 case AArch64::LD1x3WB_4S_fixed: return AArch64::LD1x3WB_4S_register;
523 case AArch64::LD1x3WB_2D_fixed: return AArch64::LD1x3WB_2D_register;
525 case AArch64::LD1x4WB_8B_fixed: return AArch64::LD1x4WB_8B_register;
526 case AArch64::LD1x4WB_4H_fixed: return AArch64::LD1x4WB_4H_register;
527 case AArch64::LD1x4WB_2S_fixed: return AArch64::LD1x4WB_2S_register;
528 case AArch64::LD1x4WB_1D_fixed: return AArch64::LD1x4WB_1D_register;
529 case AArch64::LD1x4WB_16B_fixed: return AArch64::LD1x4WB_16B_register;
530 case AArch64::LD1x4WB_8H_fixed: return AArch64::LD1x4WB_8H_register;
531 case AArch64::LD1x4WB_4S_fixed: return AArch64::LD1x4WB_4S_register;
532 case AArch64::LD1x4WB_2D_fixed: return AArch64::LD1x4WB_2D_register;
534 case AArch64::ST1WB_8B_fixed: return AArch64::ST1WB_8B_register;
535 case AArch64::ST1WB_4H_fixed: return AArch64::ST1WB_4H_register;
536 case AArch64::ST1WB_2S_fixed: return AArch64::ST1WB_2S_register;
537 case AArch64::ST1WB_1D_fixed: return AArch64::ST1WB_1D_register;
538 case AArch64::ST1WB_16B_fixed: return AArch64::ST1WB_16B_register;
539 case AArch64::ST1WB_8H_fixed: return AArch64::ST1WB_8H_register;
540 case AArch64::ST1WB_4S_fixed: return AArch64::ST1WB_4S_register;
541 case AArch64::ST1WB_2D_fixed: return AArch64::ST1WB_2D_register;
543 case AArch64::ST2WB_8B_fixed: return AArch64::ST2WB_8B_register;
544 case AArch64::ST2WB_4H_fixed: return AArch64::ST2WB_4H_register;
545 case AArch64::ST2WB_2S_fixed: return AArch64::ST2WB_2S_register;
546 case AArch64::ST2WB_16B_fixed: return AArch64::ST2WB_16B_register;
547 case AArch64::ST2WB_8H_fixed: return AArch64::ST2WB_8H_register;
548 case AArch64::ST2WB_4S_fixed: return AArch64::ST2WB_4S_register;
549 case AArch64::ST2WB_2D_fixed: return AArch64::ST2WB_2D_register;
551 case AArch64::ST3WB_8B_fixed: return AArch64::ST3WB_8B_register;
552 case AArch64::ST3WB_4H_fixed: return AArch64::ST3WB_4H_register;
553 case AArch64::ST3WB_2S_fixed: return AArch64::ST3WB_2S_register;
554 case AArch64::ST3WB_16B_fixed: return AArch64::ST3WB_16B_register;
555 case AArch64::ST3WB_8H_fixed: return AArch64::ST3WB_8H_register;
556 case AArch64::ST3WB_4S_fixed: return AArch64::ST3WB_4S_register;
557 case AArch64::ST3WB_2D_fixed: return AArch64::ST3WB_2D_register;
559 case AArch64::ST4WB_8B_fixed: return AArch64::ST4WB_8B_register;
560 case AArch64::ST4WB_4H_fixed: return AArch64::ST4WB_4H_register;
561 case AArch64::ST4WB_2S_fixed: return AArch64::ST4WB_2S_register;
562 case AArch64::ST4WB_16B_fixed: return AArch64::ST4WB_16B_register;
563 case AArch64::ST4WB_8H_fixed: return AArch64::ST4WB_8H_register;
564 case AArch64::ST4WB_4S_fixed: return AArch64::ST4WB_4S_register;
565 case AArch64::ST4WB_2D_fixed: return AArch64::ST4WB_2D_register;
567 case AArch64::ST1x2WB_8B_fixed: return AArch64::ST1x2WB_8B_register;
568 case AArch64::ST1x2WB_4H_fixed: return AArch64::ST1x2WB_4H_register;
569 case AArch64::ST1x2WB_2S_fixed: return AArch64::ST1x2WB_2S_register;
570 case AArch64::ST1x2WB_1D_fixed: return AArch64::ST1x2WB_1D_register;
571 case AArch64::ST1x2WB_16B_fixed: return AArch64::ST1x2WB_16B_register;
572 case AArch64::ST1x2WB_8H_fixed: return AArch64::ST1x2WB_8H_register;
573 case AArch64::ST1x2WB_4S_fixed: return AArch64::ST1x2WB_4S_register;
574 case AArch64::ST1x2WB_2D_fixed: return AArch64::ST1x2WB_2D_register;
576 case AArch64::ST1x3WB_8B_fixed: return AArch64::ST1x3WB_8B_register;
577 case AArch64::ST1x3WB_4H_fixed: return AArch64::ST1x3WB_4H_register;
578 case AArch64::ST1x3WB_2S_fixed: return AArch64::ST1x3WB_2S_register;
579 case AArch64::ST1x3WB_1D_fixed: return AArch64::ST1x3WB_1D_register;
580 case AArch64::ST1x3WB_16B_fixed: return AArch64::ST1x3WB_16B_register;
581 case AArch64::ST1x3WB_8H_fixed: return AArch64::ST1x3WB_8H_register;
582 case AArch64::ST1x3WB_4S_fixed: return AArch64::ST1x3WB_4S_register;
583 case AArch64::ST1x3WB_2D_fixed: return AArch64::ST1x3WB_2D_register;
585 case AArch64::ST1x4WB_8B_fixed: return AArch64::ST1x4WB_8B_register;
586 case AArch64::ST1x4WB_4H_fixed: return AArch64::ST1x4WB_4H_register;
587 case AArch64::ST1x4WB_2S_fixed: return AArch64::ST1x4WB_2S_register;
588 case AArch64::ST1x4WB_1D_fixed: return AArch64::ST1x4WB_1D_register;
589 case AArch64::ST1x4WB_16B_fixed: return AArch64::ST1x4WB_16B_register;
590 case AArch64::ST1x4WB_8H_fixed: return AArch64::ST1x4WB_8H_register;
591 case AArch64::ST1x4WB_4S_fixed: return AArch64::ST1x4WB_4S_register;
592 case AArch64::ST1x4WB_2D_fixed: return AArch64::ST1x4WB_2D_register;
594 return Opc; // If not one we handle, return it unchanged.
597 SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
599 const uint16_t *Opcodes) {
600 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
602 EVT VT = N->getValueType(0);
603 unsigned OpcodeIndex;
604 switch (VT.getSimpleVT().SimpleTy) {
605 default: llvm_unreachable("unhandled vector load type");
606 case MVT::v8i8: OpcodeIndex = 0; break;
607 case MVT::v4i16: OpcodeIndex = 1; break;
609 case MVT::v2i32: OpcodeIndex = 2; break;
611 case MVT::v1i64: OpcodeIndex = 3; break;
612 case MVT::v16i8: OpcodeIndex = 4; break;
614 case MVT::v8i16: OpcodeIndex = 5; break;
616 case MVT::v4i32: OpcodeIndex = 6; break;
618 case MVT::v2i64: OpcodeIndex = 7; break;
620 unsigned Opc = Opcodes[OpcodeIndex];
622 SmallVector<SDValue, 2> Ops;
623 unsigned AddrOpIdx = isUpdating ? 1 : 2;
624 Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
627 SDValue Inc = N->getOperand(AddrOpIdx + 1);
628 if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
629 Opc = getVLDSTRegisterUpdateOpcode(Opc);
633 Ops.push_back(N->getOperand(0)); // Push back the Chain
635 std::vector<EVT> ResTys;
636 bool is64BitVector = VT.is64BitVector();
639 ResTys.push_back(VT);
640 else if (NumVecs == 3)
641 ResTys.push_back(MVT::Untyped);
643 EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,
644 is64BitVector ? NumVecs : NumVecs * 2);
645 ResTys.push_back(ResTy);
649 ResTys.push_back(MVT::i64); // Type of the updated register
650 ResTys.push_back(MVT::Other); // Type of the Chain
652 SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
654 // Transfer memoperands.
655 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
656 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
657 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
662 // If NumVecs > 1, the return result is a super register containing 2-4
663 // consecutive vector registers.
664 SDValue SuperReg = SDValue(VLd, 0);
666 unsigned Sub0 = is64BitVector ? AArch64::dsub_0 : AArch64::qsub_0;
667 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
668 ReplaceUses(SDValue(N, Vec),
669 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
670 // Update users of the Chain
671 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
673 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
678 SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
680 const uint16_t *Opcodes) {
681 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
684 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
685 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
687 unsigned AddrOpIdx = isUpdating ? 1 : 2;
688 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
689 EVT VT = N->getOperand(Vec0Idx).getValueType();
690 unsigned OpcodeIndex;
691 switch (VT.getSimpleVT().SimpleTy) {
692 default: llvm_unreachable("unhandled vector store type");
693 case MVT::v8i8: OpcodeIndex = 0; break;
694 case MVT::v4i16: OpcodeIndex = 1; break;
696 case MVT::v2i32: OpcodeIndex = 2; break;
698 case MVT::v1i64: OpcodeIndex = 3; break;
699 case MVT::v16i8: OpcodeIndex = 4; break;
701 case MVT::v8i16: OpcodeIndex = 5; break;
703 case MVT::v4i32: OpcodeIndex = 6; break;
705 case MVT::v2i64: OpcodeIndex = 7; break;
707 unsigned Opc = Opcodes[OpcodeIndex];
709 std::vector<EVT> ResTys;
711 ResTys.push_back(MVT::i64);
712 ResTys.push_back(MVT::Other); // Type for the Chain
714 SmallVector<SDValue, 6> Ops;
715 Ops.push_back(N->getOperand(AddrOpIdx)); // Push back the Memory Address
718 SDValue Inc = N->getOperand(AddrOpIdx + 1);
719 if (!isa<ConstantSDNode>(Inc.getNode())) // Increment in Register
720 Opc = getVLDSTRegisterUpdateOpcode(Opc);
723 bool is64BitVector = VT.is64BitVector();
725 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Idx,
726 N->op_begin() + Vec0Idx + NumVecs);
727 SDValue SrcReg = is64BitVector ? createDTuple(Regs) : createQTuple(Regs);
728 Ops.push_back(SrcReg);
730 // Push back the Chain
731 Ops.push_back(N->getOperand(0));
733 // Transfer memoperands.
734 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
735 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
740 unsigned AArch64DAGToDAGISel::getTBLOpc(bool IsExt, bool Is64Bit,
742 assert(NumOfVec >= 1 && NumOfVec <= 4 && "VST NumVecs out-of-range");
750 Opc = Is64Bit ? AArch64::TBX1_8b : AArch64::TBX1_16b;
752 Opc = Is64Bit ? AArch64::TBL1_8b : AArch64::TBL1_16b;
756 Opc = Is64Bit ? AArch64::TBX2_8b : AArch64::TBX2_16b;
758 Opc = Is64Bit ? AArch64::TBL2_8b : AArch64::TBL2_16b;
762 Opc = Is64Bit ? AArch64::TBX3_8b : AArch64::TBX3_16b;
764 Opc = Is64Bit ? AArch64::TBL3_8b : AArch64::TBL3_16b;
768 Opc = Is64Bit ? AArch64::TBX4_8b : AArch64::TBX4_16b;
770 Opc = Is64Bit ? AArch64::TBL4_8b : AArch64::TBL4_16b;
777 SDNode *AArch64DAGToDAGISel::SelectVTBL(SDNode *N, unsigned NumVecs,
779 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
782 // Check the element of look up table is 64-bit or not
783 unsigned Vec0Idx = IsExt ? 2 : 1;
784 assert(!N->getOperand(Vec0Idx + 0).getValueType().is64BitVector() &&
785 "The element of lookup table for vtbl and vtbx must be 128-bit");
787 // Check the return value type is 64-bit or not
788 EVT ResVT = N->getValueType(0);
789 bool is64BitRes = ResVT.is64BitVector();
791 // Create new SDValue for vector list
792 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Idx,
793 N->op_begin() + Vec0Idx + NumVecs);
794 SDValue TblReg = createQTuple(Regs);
795 unsigned Opc = getTBLOpc(IsExt, is64BitRes, NumVecs);
797 SmallVector<SDValue, 3> Ops;
799 Ops.push_back(N->getOperand(1));
800 Ops.push_back(TblReg);
801 Ops.push_back(N->getOperand(Vec0Idx + NumVecs));
802 return CurDAG->getMachineNode(Opc, dl, ResVT, Ops);
805 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
806 // Dump information about the Node being selected
807 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n");
809 if (Node->isMachineOpcode()) {
810 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
815 switch (Node->getOpcode()) {
816 case ISD::ATOMIC_LOAD_ADD:
817 return SelectAtomic(Node,
818 AArch64::ATOMIC_LOAD_ADD_I8,
819 AArch64::ATOMIC_LOAD_ADD_I16,
820 AArch64::ATOMIC_LOAD_ADD_I32,
821 AArch64::ATOMIC_LOAD_ADD_I64);
822 case ISD::ATOMIC_LOAD_SUB:
823 return SelectAtomic(Node,
824 AArch64::ATOMIC_LOAD_SUB_I8,
825 AArch64::ATOMIC_LOAD_SUB_I16,
826 AArch64::ATOMIC_LOAD_SUB_I32,
827 AArch64::ATOMIC_LOAD_SUB_I64);
828 case ISD::ATOMIC_LOAD_AND:
829 return SelectAtomic(Node,
830 AArch64::ATOMIC_LOAD_AND_I8,
831 AArch64::ATOMIC_LOAD_AND_I16,
832 AArch64::ATOMIC_LOAD_AND_I32,
833 AArch64::ATOMIC_LOAD_AND_I64);
834 case ISD::ATOMIC_LOAD_OR:
835 return SelectAtomic(Node,
836 AArch64::ATOMIC_LOAD_OR_I8,
837 AArch64::ATOMIC_LOAD_OR_I16,
838 AArch64::ATOMIC_LOAD_OR_I32,
839 AArch64::ATOMIC_LOAD_OR_I64);
840 case ISD::ATOMIC_LOAD_XOR:
841 return SelectAtomic(Node,
842 AArch64::ATOMIC_LOAD_XOR_I8,
843 AArch64::ATOMIC_LOAD_XOR_I16,
844 AArch64::ATOMIC_LOAD_XOR_I32,
845 AArch64::ATOMIC_LOAD_XOR_I64);
846 case ISD::ATOMIC_LOAD_NAND:
847 return SelectAtomic(Node,
848 AArch64::ATOMIC_LOAD_NAND_I8,
849 AArch64::ATOMIC_LOAD_NAND_I16,
850 AArch64::ATOMIC_LOAD_NAND_I32,
851 AArch64::ATOMIC_LOAD_NAND_I64);
852 case ISD::ATOMIC_LOAD_MIN:
853 return SelectAtomic(Node,
854 AArch64::ATOMIC_LOAD_MIN_I8,
855 AArch64::ATOMIC_LOAD_MIN_I16,
856 AArch64::ATOMIC_LOAD_MIN_I32,
857 AArch64::ATOMIC_LOAD_MIN_I64);
858 case ISD::ATOMIC_LOAD_MAX:
859 return SelectAtomic(Node,
860 AArch64::ATOMIC_LOAD_MAX_I8,
861 AArch64::ATOMIC_LOAD_MAX_I16,
862 AArch64::ATOMIC_LOAD_MAX_I32,
863 AArch64::ATOMIC_LOAD_MAX_I64);
864 case ISD::ATOMIC_LOAD_UMIN:
865 return SelectAtomic(Node,
866 AArch64::ATOMIC_LOAD_UMIN_I8,
867 AArch64::ATOMIC_LOAD_UMIN_I16,
868 AArch64::ATOMIC_LOAD_UMIN_I32,
869 AArch64::ATOMIC_LOAD_UMIN_I64);
870 case ISD::ATOMIC_LOAD_UMAX:
871 return SelectAtomic(Node,
872 AArch64::ATOMIC_LOAD_UMAX_I8,
873 AArch64::ATOMIC_LOAD_UMAX_I16,
874 AArch64::ATOMIC_LOAD_UMAX_I32,
875 AArch64::ATOMIC_LOAD_UMAX_I64);
876 case ISD::ATOMIC_SWAP:
877 return SelectAtomic(Node,
878 AArch64::ATOMIC_SWAP_I8,
879 AArch64::ATOMIC_SWAP_I16,
880 AArch64::ATOMIC_SWAP_I32,
881 AArch64::ATOMIC_SWAP_I64);
882 case ISD::ATOMIC_CMP_SWAP:
883 return SelectAtomic(Node,
884 AArch64::ATOMIC_CMP_SWAP_I8,
885 AArch64::ATOMIC_CMP_SWAP_I16,
886 AArch64::ATOMIC_CMP_SWAP_I32,
887 AArch64::ATOMIC_CMP_SWAP_I64);
888 case ISD::FrameIndex: {
889 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
890 EVT PtrTy = getTargetLowering()->getPointerTy();
891 SDValue TFI = CurDAG->getTargetFrameIndex(FI, PtrTy);
892 return CurDAG->SelectNodeTo(Node, AArch64::ADDxxi_lsl0_s, PtrTy,
893 TFI, CurDAG->getTargetConstant(0, PtrTy));
895 case ISD::ConstantPool: {
896 // Constant pools are fine, just create a Target entry.
897 ConstantPoolSDNode *CN = cast<ConstantPoolSDNode>(Node);
898 const Constant *C = CN->getConstVal();
899 SDValue CP = CurDAG->getTargetConstantPool(C, CN->getValueType(0));
901 ReplaceUses(SDValue(Node, 0), CP);
904 case ISD::Constant: {
906 if (cast<ConstantSDNode>(Node)->getZExtValue() == 0) {
907 // XZR and WZR are probably even better than an actual move: most of the
908 // time they can be folded into another instruction with *no* cost.
910 EVT Ty = Node->getValueType(0);
911 assert((Ty == MVT::i32 || Ty == MVT::i64) && "unexpected type");
912 uint16_t Register = Ty == MVT::i32 ? AArch64::WZR : AArch64::XZR;
913 ResNode = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
915 Register, Ty).getNode();
918 // Next best option is a move-immediate, see if we can do that.
920 ResNode = TrySelectToMoveImm(Node);
926 // If even that fails we fall back to a lit-pool entry at the moment. Future
927 // tuning may change this to a sequence of MOVZ/MOVN/MOVK instructions.
928 ResNode = SelectToLitPool(Node);
929 assert(ResNode && "We need *some* way to materialise a constant");
931 // We want to continue selection at this point since the litpool access
932 // generated used generic nodes for simplicity.
933 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
937 case ISD::ConstantFP: {
938 if (A64Imms::isFPImm(cast<ConstantFPSDNode>(Node)->getValueAPF())) {
939 // FMOV will take care of it from TableGen
943 SDNode *ResNode = LowerToFPLitPool(Node);
944 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
946 // We want to continue selection at this point since the litpool access
947 // generated used generic nodes for simplicity.
951 case AArch64ISD::NEON_LD1_UPD: {
952 static const uint16_t Opcodes[] = {
953 AArch64::LD1WB_8B_fixed, AArch64::LD1WB_4H_fixed,
954 AArch64::LD1WB_2S_fixed, AArch64::LD1WB_1D_fixed,
955 AArch64::LD1WB_16B_fixed, AArch64::LD1WB_8H_fixed,
956 AArch64::LD1WB_4S_fixed, AArch64::LD1WB_2D_fixed
958 return SelectVLD(Node, 1, true, Opcodes);
960 case AArch64ISD::NEON_LD2_UPD: {
961 static const uint16_t Opcodes[] = {
962 AArch64::LD2WB_8B_fixed, AArch64::LD2WB_4H_fixed,
963 AArch64::LD2WB_2S_fixed, AArch64::LD1x2WB_1D_fixed,
964 AArch64::LD2WB_16B_fixed, AArch64::LD2WB_8H_fixed,
965 AArch64::LD2WB_4S_fixed, AArch64::LD2WB_2D_fixed
967 return SelectVLD(Node, 2, true, Opcodes);
969 case AArch64ISD::NEON_LD3_UPD: {
970 static const uint16_t Opcodes[] = {
971 AArch64::LD3WB_8B_fixed, AArch64::LD3WB_4H_fixed,
972 AArch64::LD3WB_2S_fixed, AArch64::LD1x3WB_1D_fixed,
973 AArch64::LD3WB_16B_fixed, AArch64::LD3WB_8H_fixed,
974 AArch64::LD3WB_4S_fixed, AArch64::LD3WB_2D_fixed
976 return SelectVLD(Node, 3, true, Opcodes);
978 case AArch64ISD::NEON_LD4_UPD: {
979 static const uint16_t Opcodes[] = {
980 AArch64::LD4WB_8B_fixed, AArch64::LD4WB_4H_fixed,
981 AArch64::LD4WB_2S_fixed, AArch64::LD1x4WB_1D_fixed,
982 AArch64::LD4WB_16B_fixed, AArch64::LD4WB_8H_fixed,
983 AArch64::LD4WB_4S_fixed, AArch64::LD4WB_2D_fixed
985 return SelectVLD(Node, 4, true, Opcodes);
987 case AArch64ISD::NEON_LD1x2_UPD: {
988 static const uint16_t Opcodes[] = {
989 AArch64::LD1x2WB_8B_fixed, AArch64::LD1x2WB_4H_fixed,
990 AArch64::LD1x2WB_2S_fixed, AArch64::LD1x2WB_1D_fixed,
991 AArch64::LD1x2WB_16B_fixed, AArch64::LD1x2WB_8H_fixed,
992 AArch64::LD1x2WB_4S_fixed, AArch64::LD1x2WB_2D_fixed
994 return SelectVLD(Node, 2, true, Opcodes);
996 case AArch64ISD::NEON_LD1x3_UPD: {
997 static const uint16_t Opcodes[] = {
998 AArch64::LD1x3WB_8B_fixed, AArch64::LD1x3WB_4H_fixed,
999 AArch64::LD1x3WB_2S_fixed, AArch64::LD1x3WB_1D_fixed,
1000 AArch64::LD1x3WB_16B_fixed, AArch64::LD1x3WB_8H_fixed,
1001 AArch64::LD1x3WB_4S_fixed, AArch64::LD1x3WB_2D_fixed
1003 return SelectVLD(Node, 3, true, Opcodes);
1005 case AArch64ISD::NEON_LD1x4_UPD: {
1006 static const uint16_t Opcodes[] = {
1007 AArch64::LD1x4WB_8B_fixed, AArch64::LD1x4WB_4H_fixed,
1008 AArch64::LD1x4WB_2S_fixed, AArch64::LD1x4WB_1D_fixed,
1009 AArch64::LD1x4WB_16B_fixed, AArch64::LD1x4WB_8H_fixed,
1010 AArch64::LD1x4WB_4S_fixed, AArch64::LD1x4WB_2D_fixed
1012 return SelectVLD(Node, 4, true, Opcodes);
1014 case AArch64ISD::NEON_ST1_UPD: {
1015 static const uint16_t Opcodes[] = {
1016 AArch64::ST1WB_8B_fixed, AArch64::ST1WB_4H_fixed,
1017 AArch64::ST1WB_2S_fixed, AArch64::ST1WB_1D_fixed,
1018 AArch64::ST1WB_16B_fixed, AArch64::ST1WB_8H_fixed,
1019 AArch64::ST1WB_4S_fixed, AArch64::ST1WB_2D_fixed
1021 return SelectVST(Node, 1, true, Opcodes);
1023 case AArch64ISD::NEON_ST2_UPD: {
1024 static const uint16_t Opcodes[] = {
1025 AArch64::ST2WB_8B_fixed, AArch64::ST2WB_4H_fixed,
1026 AArch64::ST2WB_2S_fixed, AArch64::ST1x2WB_1D_fixed,
1027 AArch64::ST2WB_16B_fixed, AArch64::ST2WB_8H_fixed,
1028 AArch64::ST2WB_4S_fixed, AArch64::ST2WB_2D_fixed
1030 return SelectVST(Node, 2, true, Opcodes);
1032 case AArch64ISD::NEON_ST3_UPD: {
1033 static const uint16_t Opcodes[] = {
1034 AArch64::ST3WB_8B_fixed, AArch64::ST3WB_4H_fixed,
1035 AArch64::ST3WB_2S_fixed, AArch64::ST1x3WB_1D_fixed,
1036 AArch64::ST3WB_16B_fixed, AArch64::ST3WB_8H_fixed,
1037 AArch64::ST3WB_4S_fixed, AArch64::ST3WB_2D_fixed
1039 return SelectVST(Node, 3, true, Opcodes);
1041 case AArch64ISD::NEON_ST4_UPD: {
1042 static const uint16_t Opcodes[] = {
1043 AArch64::ST4WB_8B_fixed, AArch64::ST4WB_4H_fixed,
1044 AArch64::ST4WB_2S_fixed, AArch64::ST1x4WB_1D_fixed,
1045 AArch64::ST4WB_16B_fixed, AArch64::ST4WB_8H_fixed,
1046 AArch64::ST4WB_4S_fixed, AArch64::ST4WB_2D_fixed
1048 return SelectVST(Node, 4, true, Opcodes);
1050 case AArch64ISD::NEON_ST1x2_UPD: {
1051 static const uint16_t Opcodes[] = {
1052 AArch64::ST1x2WB_8B_fixed, AArch64::ST1x2WB_4H_fixed,
1053 AArch64::ST1x2WB_2S_fixed, AArch64::ST1x2WB_1D_fixed,
1054 AArch64::ST1x2WB_16B_fixed, AArch64::ST1x2WB_8H_fixed,
1055 AArch64::ST1x2WB_4S_fixed, AArch64::ST1x2WB_2D_fixed
1057 return SelectVST(Node, 2, true, Opcodes);
1059 case AArch64ISD::NEON_ST1x3_UPD: {
1060 static const uint16_t Opcodes[] = {
1061 AArch64::ST1x3WB_8B_fixed, AArch64::ST1x3WB_4H_fixed,
1062 AArch64::ST1x3WB_2S_fixed, AArch64::ST1x3WB_1D_fixed,
1063 AArch64::ST1x3WB_16B_fixed, AArch64::ST1x3WB_8H_fixed,
1064 AArch64::ST1x3WB_4S_fixed, AArch64::ST1x3WB_2D_fixed
1066 return SelectVST(Node, 3, true, Opcodes);
1068 case AArch64ISD::NEON_ST1x4_UPD: {
1069 static const uint16_t Opcodes[] = {
1070 AArch64::ST1x4WB_8B_fixed, AArch64::ST1x4WB_4H_fixed,
1071 AArch64::ST1x4WB_2S_fixed, AArch64::ST1x4WB_1D_fixed,
1072 AArch64::ST1x4WB_16B_fixed, AArch64::ST1x4WB_8H_fixed,
1073 AArch64::ST1x4WB_4S_fixed, AArch64::ST1x4WB_2D_fixed
1075 return SelectVST(Node, 4, true, Opcodes);
1077 case ISD::INTRINSIC_WO_CHAIN: {
1078 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
1083 case Intrinsic::aarch64_neon_vtbx1:
1085 case Intrinsic::aarch64_neon_vtbl1:
1086 return SelectVTBL(Node, 1, IsExt);
1087 case Intrinsic::aarch64_neon_vtbx2:
1089 case Intrinsic::aarch64_neon_vtbl2:
1090 return SelectVTBL(Node, 2, IsExt);
1091 case Intrinsic::aarch64_neon_vtbx3:
1093 case Intrinsic::aarch64_neon_vtbl3:
1094 return SelectVTBL(Node, 3, IsExt);
1095 case Intrinsic::aarch64_neon_vtbx4:
1097 case Intrinsic::aarch64_neon_vtbl4:
1098 return SelectVTBL(Node, 4, IsExt);
1102 case ISD::INTRINSIC_VOID:
1103 case ISD::INTRINSIC_W_CHAIN: {
1104 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1109 case Intrinsic::arm_neon_vld1: {
1110 static const uint16_t Opcodes[] = { AArch64::LD1_8B, AArch64::LD1_4H,
1111 AArch64::LD1_2S, AArch64::LD1_1D,
1112 AArch64::LD1_16B, AArch64::LD1_8H,
1113 AArch64::LD1_4S, AArch64::LD1_2D };
1114 return SelectVLD(Node, 1, false, Opcodes);
1116 case Intrinsic::arm_neon_vld2: {
1117 static const uint16_t Opcodes[] = { AArch64::LD2_8B, AArch64::LD2_4H,
1118 AArch64::LD2_2S, AArch64::LD1x2_1D,
1119 AArch64::LD2_16B, AArch64::LD2_8H,
1120 AArch64::LD2_4S, AArch64::LD2_2D };
1121 return SelectVLD(Node, 2, false, Opcodes);
1123 case Intrinsic::arm_neon_vld3: {
1124 static const uint16_t Opcodes[] = { AArch64::LD3_8B, AArch64::LD3_4H,
1125 AArch64::LD3_2S, AArch64::LD1x3_1D,
1126 AArch64::LD3_16B, AArch64::LD3_8H,
1127 AArch64::LD3_4S, AArch64::LD3_2D };
1128 return SelectVLD(Node, 3, false, Opcodes);
1130 case Intrinsic::arm_neon_vld4: {
1131 static const uint16_t Opcodes[] = { AArch64::LD4_8B, AArch64::LD4_4H,
1132 AArch64::LD4_2S, AArch64::LD1x4_1D,
1133 AArch64::LD4_16B, AArch64::LD4_8H,
1134 AArch64::LD4_4S, AArch64::LD4_2D };
1135 return SelectVLD(Node, 4, false, Opcodes);
1137 case Intrinsic::aarch64_neon_vld1x2: {
1138 static const uint16_t Opcodes[] = {
1139 AArch64::LD1x2_8B, AArch64::LD1x2_4H, AArch64::LD1x2_2S,
1140 AArch64::LD1x2_1D, AArch64::LD1x2_16B, AArch64::LD1x2_8H,
1141 AArch64::LD1x2_4S, AArch64::LD1x2_2D
1143 return SelectVLD(Node, 2, false, Opcodes);
1145 case Intrinsic::aarch64_neon_vld1x3: {
1146 static const uint16_t Opcodes[] = {
1147 AArch64::LD1x3_8B, AArch64::LD1x3_4H, AArch64::LD1x3_2S,
1148 AArch64::LD1x3_1D, AArch64::LD1x3_16B, AArch64::LD1x3_8H,
1149 AArch64::LD1x3_4S, AArch64::LD1x3_2D
1151 return SelectVLD(Node, 3, false, Opcodes);
1153 case Intrinsic::aarch64_neon_vld1x4: {
1154 static const uint16_t Opcodes[] = {
1155 AArch64::LD1x4_8B, AArch64::LD1x4_4H, AArch64::LD1x4_2S,
1156 AArch64::LD1x4_1D, AArch64::LD1x4_16B, AArch64::LD1x4_8H,
1157 AArch64::LD1x4_4S, AArch64::LD1x4_2D
1159 return SelectVLD(Node, 4, false, Opcodes);
1161 case Intrinsic::arm_neon_vst1: {
1162 static const uint16_t Opcodes[] = { AArch64::ST1_8B, AArch64::ST1_4H,
1163 AArch64::ST1_2S, AArch64::ST1_1D,
1164 AArch64::ST1_16B, AArch64::ST1_8H,
1165 AArch64::ST1_4S, AArch64::ST1_2D };
1166 return SelectVST(Node, 1, false, Opcodes);
1168 case Intrinsic::arm_neon_vst2: {
1169 static const uint16_t Opcodes[] = { AArch64::ST2_8B, AArch64::ST2_4H,
1170 AArch64::ST2_2S, AArch64::ST1x2_1D,
1171 AArch64::ST2_16B, AArch64::ST2_8H,
1172 AArch64::ST2_4S, AArch64::ST2_2D };
1173 return SelectVST(Node, 2, false, Opcodes);
1175 case Intrinsic::arm_neon_vst3: {
1176 static const uint16_t Opcodes[] = { AArch64::ST3_8B, AArch64::ST3_4H,
1177 AArch64::ST3_2S, AArch64::ST1x3_1D,
1178 AArch64::ST3_16B, AArch64::ST3_8H,
1179 AArch64::ST3_4S, AArch64::ST3_2D };
1180 return SelectVST(Node, 3, false, Opcodes);
1182 case Intrinsic::arm_neon_vst4: {
1183 static const uint16_t Opcodes[] = { AArch64::ST4_8B, AArch64::ST4_4H,
1184 AArch64::ST4_2S, AArch64::ST1x4_1D,
1185 AArch64::ST4_16B, AArch64::ST4_8H,
1186 AArch64::ST4_4S, AArch64::ST4_2D };
1187 return SelectVST(Node, 4, false, Opcodes);
1189 case Intrinsic::aarch64_neon_vst1x2: {
1190 static const uint16_t Opcodes[] = {
1191 AArch64::ST1x2_8B, AArch64::ST1x2_4H, AArch64::ST1x2_2S,
1192 AArch64::ST1x2_1D, AArch64::ST1x2_16B, AArch64::ST1x2_8H,
1193 AArch64::ST1x2_4S, AArch64::ST1x2_2D
1195 return SelectVST(Node, 2, false, Opcodes);
1197 case Intrinsic::aarch64_neon_vst1x3: {
1198 static const uint16_t Opcodes[] = {
1199 AArch64::ST1x3_8B, AArch64::ST1x3_4H, AArch64::ST1x3_2S,
1200 AArch64::ST1x3_1D, AArch64::ST1x3_16B, AArch64::ST1x3_8H,
1201 AArch64::ST1x3_4S, AArch64::ST1x3_2D
1203 return SelectVST(Node, 3, false, Opcodes);
1205 case Intrinsic::aarch64_neon_vst1x4: {
1206 static const uint16_t Opcodes[] = {
1207 AArch64::ST1x4_8B, AArch64::ST1x4_4H, AArch64::ST1x4_2S,
1208 AArch64::ST1x4_1D, AArch64::ST1x4_16B, AArch64::ST1x4_8H,
1209 AArch64::ST1x4_4S, AArch64::ST1x4_2D
1211 return SelectVST(Node, 4, false, Opcodes);
1217 break; // Let generic code handle it
1220 SDNode *ResNode = SelectCode(Node);
1222 DEBUG(dbgs() << "=> ";
1223 if (ResNode == NULL || ResNode == Node)
1226 ResNode->dump(CurDAG);
1232 /// This pass converts a legalized DAG into a AArch64-specific DAG, ready for
1233 /// instruction scheduling.
1234 FunctionPass *llvm::createAArch64ISelDAG(AArch64TargetMachine &TM,
1235 CodeGenOpt::Level OptLevel) {
1236 return new AArch64DAGToDAGISel(TM, OptLevel);