1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMTargetMachine.h"
17 #include "MCTargetDesc/ARMAddressingModes.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SelectionDAGISel.h"
24 #include "llvm/IR/CallingConv.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Compiler.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Target/TargetLowering.h"
35 #include "llvm/Target/TargetOptions.h"
39 #define DEBUG_TYPE "arm-isel"
42 DisableShifterOp("disable-shifter-op", cl::Hidden,
43 cl::desc("Disable isel of shifter-op"),
47 CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
48 cl::desc("Check fp vmla / vmls hazard at isel time"),
51 //===--------------------------------------------------------------------===//
52 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
53 /// instructions for SelectionDAG operations.
58 AM2_BASE, // Simple AM2 (+-imm12)
59 AM2_SHOP // Shifter-op AM2
62 class ARMDAGToDAGISel : public SelectionDAGISel {
63 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
64 /// make the right decision when generating code for different targets.
65 const ARMSubtarget *Subtarget;
68 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, CodeGenOpt::Level OptLevel)
69 : SelectionDAGISel(tm, OptLevel) {}
71 bool runOnMachineFunction(MachineFunction &MF) override {
72 // Reset the subtarget each time through.
73 Subtarget = &MF.getTarget().getSubtarget<ARMSubtarget>();
74 SelectionDAGISel::runOnMachineFunction(MF);
78 const char *getPassName() const override {
79 return "ARM Instruction Selection";
82 void PreprocessISelDAG() override;
84 /// getI32Imm - Return a target constant of type i32 with the specified
86 inline SDValue getI32Imm(unsigned Imm) {
87 return CurDAG->getTargetConstant(Imm, MVT::i32);
90 SDNode *Select(SDNode *N) override;
93 bool hasNoVMLxHazardUse(SDNode *N) const;
94 bool isShifterOpProfitable(const SDValue &Shift,
95 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
96 bool SelectRegShifterOperand(SDValue N, SDValue &A,
97 SDValue &B, SDValue &C,
98 bool CheckProfitability = true);
99 bool SelectImmShifterOperand(SDValue N, SDValue &A,
100 SDValue &B, bool CheckProfitability = true);
101 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A,
102 SDValue &B, SDValue &C) {
103 // Don't apply the profitability check
104 return SelectRegShifterOperand(N, A, B, C, false);
106 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A,
108 // Don't apply the profitability check
109 return SelectImmShifterOperand(N, A, B, false);
112 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
113 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
115 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
116 SDValue &Offset, SDValue &Opc);
117 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
119 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
122 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
124 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
127 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
129 SelectAddrMode2Worker(N, Base, Offset, Opc);
130 // return SelectAddrMode2ShOp(N, Base, Offset, Opc);
131 // This always matches one way or another.
135 bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
136 const ConstantSDNode *CN = cast<ConstantSDNode>(N);
137 Pred = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
138 Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
142 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
143 SDValue &Offset, SDValue &Opc);
144 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
145 SDValue &Offset, SDValue &Opc);
146 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
147 SDValue &Offset, SDValue &Opc);
148 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
149 bool SelectAddrMode3(SDValue N, SDValue &Base,
150 SDValue &Offset, SDValue &Opc);
151 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
152 SDValue &Offset, SDValue &Opc);
153 bool SelectAddrMode5(SDValue N, SDValue &Base,
155 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
156 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
158 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
160 // Thumb Addressing Modes:
161 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
162 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
164 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
165 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
166 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
167 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
169 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
171 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
173 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
175 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
177 // Thumb 2 Addressing Modes:
178 bool SelectT2ShifterOperandReg(SDValue N,
179 SDValue &BaseReg, SDValue &Opc);
180 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
181 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
183 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
185 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
186 SDValue &OffReg, SDValue &ShImm);
187 bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm);
189 inline bool is_so_imm(unsigned Imm) const {
190 return ARM_AM::getSOImmVal(Imm) != -1;
193 inline bool is_so_imm_not(unsigned Imm) const {
194 return ARM_AM::getSOImmVal(~Imm) != -1;
197 inline bool is_t2_so_imm(unsigned Imm) const {
198 return ARM_AM::getT2SOImmVal(Imm) != -1;
201 inline bool is_t2_so_imm_not(unsigned Imm) const {
202 return ARM_AM::getT2SOImmVal(~Imm) != -1;
205 // Include the pieces autogenerated from the target description.
206 #include "ARMGenDAGISel.inc"
209 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
211 SDNode *SelectARMIndexedLoad(SDNode *N);
212 SDNode *SelectT2IndexedLoad(SDNode *N);
214 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
215 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
216 /// loads of D registers and even subregs and odd subregs of Q registers.
217 /// For NumVecs <= 2, QOpcodes1 is not used.
218 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
219 const uint16_t *DOpcodes,
220 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
222 /// SelectVST - Select NEON store intrinsics. NumVecs should
223 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
224 /// stores of D registers and even subregs and odd subregs of Q registers.
225 /// For NumVecs <= 2, QOpcodes1 is not used.
226 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
227 const uint16_t *DOpcodes,
228 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
230 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
231 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
232 /// load/store of D registers and Q registers.
233 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
234 bool isUpdating, unsigned NumVecs,
235 const uint16_t *DOpcodes, const uint16_t *QOpcodes);
237 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
238 /// should be 2, 3 or 4. The opcode array specifies the instructions used
239 /// for loading D registers. (Q registers are not supported.)
240 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
241 const uint16_t *Opcodes);
243 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
244 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
245 /// generated to force the table registers to be consecutive.
246 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
248 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
249 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
251 // Select special operations if node forms integer ABS pattern
252 SDNode *SelectABSOp(SDNode *N);
254 SDNode *SelectInlineAsm(SDNode *N);
256 SDNode *SelectConcatVector(SDNode *N);
258 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
259 /// inline asm expressions.
260 bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
261 std::vector<SDValue> &OutOps) override;
263 // Form pairs of consecutive R, S, D, or Q registers.
264 SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1);
265 SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1);
266 SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1);
267 SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1);
269 // Form sequences of 4 consecutive S, D, or Q registers.
270 SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
271 SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
272 SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
274 // Get the alignment operand for a NEON VLD or VST instruction.
275 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
279 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
280 /// operand. If so Imm will receive the 32-bit value.
281 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
282 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
283 Imm = cast<ConstantSDNode>(N)->getZExtValue();
289 // isInt32Immediate - This method tests to see if a constant operand.
290 // If so Imm will receive the 32 bit value.
291 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
292 return isInt32Immediate(N.getNode(), Imm);
295 // isOpcWithIntImmediate - This method tests to see if the node is a specific
296 // opcode and that it has a immediate integer right operand.
297 // If so Imm will receive the 32 bit value.
298 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
299 return N->getOpcode() == Opc &&
300 isInt32Immediate(N->getOperand(1).getNode(), Imm);
303 /// \brief Check whether a particular node is a constant value representable as
304 /// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
306 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
307 static bool isScaledConstantInRange(SDValue Node, int Scale,
308 int RangeMin, int RangeMax,
309 int &ScaledConstant) {
310 assert(Scale > 0 && "Invalid scale!");
312 // Check that this is a constant.
313 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
317 ScaledConstant = (int) C->getZExtValue();
318 if ((ScaledConstant % Scale) != 0)
321 ScaledConstant /= Scale;
322 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
325 void ARMDAGToDAGISel::PreprocessISelDAG() {
326 if (!Subtarget->hasV6T2Ops())
329 bool isThumb2 = Subtarget->isThumb();
330 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
331 E = CurDAG->allnodes_end(); I != E; ) {
332 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
334 if (N->getOpcode() != ISD::ADD)
337 // Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
338 // leading zeros, followed by consecutive set bits, followed by 1 or 2
339 // trailing zeros, e.g. 1020.
340 // Transform the expression to
341 // (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number
342 // of trailing zeros of c2. The left shift would be folded as an shifter
343 // operand of 'add' and the 'and' and 'srl' would become a bits extraction
346 SDValue N0 = N->getOperand(0);
347 SDValue N1 = N->getOperand(1);
348 unsigned And_imm = 0;
349 if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
350 if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
356 // Check if the AND mask is an immediate of the form: 000.....1111111100
357 unsigned TZ = countTrailingZeros(And_imm);
358 if (TZ != 1 && TZ != 2)
359 // Be conservative here. Shifter operands aren't always free. e.g. On
360 // Swift, left shifter operand of 1 / 2 for free but others are not.
362 // ubfx r3, r1, #16, #8
363 // ldr.w r3, [r0, r3, lsl #2]
366 // and.w r2, r9, r1, lsr #14
370 if (And_imm & (And_imm + 1))
373 // Look for (and (srl X, c1), c2).
374 SDValue Srl = N1.getOperand(0);
375 unsigned Srl_imm = 0;
376 if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) ||
380 // Make sure first operand is not a shifter operand which would prevent
381 // folding of the left shift.
386 if (SelectT2ShifterOperandReg(N0, CPTmp0, CPTmp1))
389 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) ||
390 SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2))
394 // Now make the transformation.
395 Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
397 CurDAG->getConstant(Srl_imm+TZ, MVT::i32));
398 N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
399 Srl, CurDAG->getConstant(And_imm, MVT::i32));
400 N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
401 N1, CurDAG->getConstant(TZ, MVT::i32));
402 CurDAG->UpdateNodeOperands(N, N0, N1);
406 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
407 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
408 /// least on current ARM implementations) which should be avoidded.
409 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
410 if (OptLevel == CodeGenOpt::None)
413 if (!CheckVMLxHazard)
416 if (!Subtarget->isCortexA7() && !Subtarget->isCortexA8() &&
417 !Subtarget->isCortexA9() && !Subtarget->isSwift())
423 SDNode *Use = *N->use_begin();
424 if (Use->getOpcode() == ISD::CopyToReg)
426 if (Use->isMachineOpcode()) {
427 const ARMBaseInstrInfo *TII = static_cast<const ARMBaseInstrInfo *>(
428 CurDAG->getSubtarget().getInstrInfo());
430 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
433 unsigned Opcode = MCID.getOpcode();
434 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
436 // vmlx feeding into another vmlx. We actually want to unfold
437 // the use later in the MLxExpansion pass. e.g.
439 // vmla (stall 8 cycles)
444 // This adds up to about 18 - 19 cycles.
447 // vmul (stall 4 cycles)
448 // vadd adds up to about 14 cycles.
449 return TII->isFpMLxInstruction(Opcode);
455 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
456 ARM_AM::ShiftOpc ShOpcVal,
458 if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
460 if (Shift.hasOneUse())
463 return ShOpcVal == ARM_AM::lsl &&
464 (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
467 bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
470 bool CheckProfitability) {
471 if (DisableShifterOp)
474 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
476 // Don't match base register only case. That is matched to a separate
477 // lower complexity pattern with explicit register operand.
478 if (ShOpcVal == ARM_AM::no_shift) return false;
480 BaseReg = N.getOperand(0);
481 unsigned ShImmVal = 0;
482 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
483 if (!RHS) return false;
484 ShImmVal = RHS->getZExtValue() & 31;
485 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
490 bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
494 bool CheckProfitability) {
495 if (DisableShifterOp)
498 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
500 // Don't match base register only case. That is matched to a separate
501 // lower complexity pattern with explicit register operand.
502 if (ShOpcVal == ARM_AM::no_shift) return false;
504 BaseReg = N.getOperand(0);
505 unsigned ShImmVal = 0;
506 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
507 if (RHS) return false;
509 ShReg = N.getOperand(1);
510 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
512 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
518 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
521 // Match simple R + imm12 operands.
524 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
525 !CurDAG->isBaseWithConstantOffset(N)) {
526 if (N.getOpcode() == ISD::FrameIndex) {
527 // Match frame index.
528 int FI = cast<FrameIndexSDNode>(N)->getIndex();
529 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
530 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
534 if (N.getOpcode() == ARMISD::Wrapper &&
535 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
536 Base = N.getOperand(0);
539 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
543 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
544 int RHSC = (int)RHS->getSExtValue();
545 if (N.getOpcode() == ISD::SUB)
548 if (RHSC > -0x1000 && RHSC < 0x1000) { // 12 bits
549 Base = N.getOperand(0);
550 if (Base.getOpcode() == ISD::FrameIndex) {
551 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
552 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
554 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
561 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
567 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
569 if (N.getOpcode() == ISD::MUL &&
570 ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
571 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
572 // X * [3,5,9] -> X + X * [2,4,8] etc.
573 int RHSC = (int)RHS->getZExtValue();
576 ARM_AM::AddrOpc AddSub = ARM_AM::add;
578 AddSub = ARM_AM::sub;
581 if (isPowerOf2_32(RHSC)) {
582 unsigned ShAmt = Log2_32(RHSC);
583 Base = Offset = N.getOperand(0);
584 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
593 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
594 // ISD::OR that is equivalent to an ISD::ADD.
595 !CurDAG->isBaseWithConstantOffset(N))
598 // Leave simple R +/- imm12 operands for LDRi12
599 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
601 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
602 -0x1000+1, 0x1000, RHSC)) // 12 bits.
606 // Otherwise this is R +/- [possibly shifted] R.
607 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
608 ARM_AM::ShiftOpc ShOpcVal =
609 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
612 Base = N.getOperand(0);
613 Offset = N.getOperand(1);
615 if (ShOpcVal != ARM_AM::no_shift) {
616 // Check to see if the RHS of the shift is a constant, if not, we can't fold
618 if (ConstantSDNode *Sh =
619 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
620 ShAmt = Sh->getZExtValue();
621 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
622 Offset = N.getOperand(1).getOperand(0);
625 ShOpcVal = ARM_AM::no_shift;
628 ShOpcVal = ARM_AM::no_shift;
632 // Try matching (R shl C) + (R).
633 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
634 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
635 N.getOperand(0).hasOneUse())) {
636 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
637 if (ShOpcVal != ARM_AM::no_shift) {
638 // Check to see if the RHS of the shift is a constant, if not, we can't
640 if (ConstantSDNode *Sh =
641 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
642 ShAmt = Sh->getZExtValue();
643 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
644 Offset = N.getOperand(0).getOperand(0);
645 Base = N.getOperand(1);
648 ShOpcVal = ARM_AM::no_shift;
651 ShOpcVal = ARM_AM::no_shift;
656 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
664 AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
668 if (N.getOpcode() == ISD::MUL &&
669 (!(Subtarget->isLikeA9() || Subtarget->isSwift()) || N.hasOneUse())) {
670 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
671 // X * [3,5,9] -> X + X * [2,4,8] etc.
672 int RHSC = (int)RHS->getZExtValue();
675 ARM_AM::AddrOpc AddSub = ARM_AM::add;
677 AddSub = ARM_AM::sub;
680 if (isPowerOf2_32(RHSC)) {
681 unsigned ShAmt = Log2_32(RHSC);
682 Base = Offset = N.getOperand(0);
683 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
692 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
693 // ISD::OR that is equivalent to an ADD.
694 !CurDAG->isBaseWithConstantOffset(N)) {
696 if (N.getOpcode() == ISD::FrameIndex) {
697 int FI = cast<FrameIndexSDNode>(N)->getIndex();
698 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
699 } else if (N.getOpcode() == ARMISD::Wrapper &&
700 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
701 Base = N.getOperand(0);
703 Offset = CurDAG->getRegister(0, MVT::i32);
704 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
710 // Match simple R +/- imm12 operands.
711 if (N.getOpcode() != ISD::SUB) {
713 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
714 -0x1000+1, 0x1000, RHSC)) { // 12 bits.
715 Base = N.getOperand(0);
716 if (Base.getOpcode() == ISD::FrameIndex) {
717 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
718 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
720 Offset = CurDAG->getRegister(0, MVT::i32);
722 ARM_AM::AddrOpc AddSub = ARM_AM::add;
724 AddSub = ARM_AM::sub;
727 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
734 if ((Subtarget->isLikeA9() || Subtarget->isSwift()) && !N.hasOneUse()) {
735 // Compute R +/- (R << N) and reuse it.
737 Offset = CurDAG->getRegister(0, MVT::i32);
738 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
744 // Otherwise this is R +/- [possibly shifted] R.
745 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
746 ARM_AM::ShiftOpc ShOpcVal =
747 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
750 Base = N.getOperand(0);
751 Offset = N.getOperand(1);
753 if (ShOpcVal != ARM_AM::no_shift) {
754 // Check to see if the RHS of the shift is a constant, if not, we can't fold
756 if (ConstantSDNode *Sh =
757 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
758 ShAmt = Sh->getZExtValue();
759 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
760 Offset = N.getOperand(1).getOperand(0);
763 ShOpcVal = ARM_AM::no_shift;
766 ShOpcVal = ARM_AM::no_shift;
770 // Try matching (R shl C) + (R).
771 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
772 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
773 N.getOperand(0).hasOneUse())) {
774 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
775 if (ShOpcVal != ARM_AM::no_shift) {
776 // Check to see if the RHS of the shift is a constant, if not, we can't
778 if (ConstantSDNode *Sh =
779 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
780 ShAmt = Sh->getZExtValue();
781 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
782 Offset = N.getOperand(0).getOperand(0);
783 Base = N.getOperand(1);
786 ShOpcVal = ARM_AM::no_shift;
789 ShOpcVal = ARM_AM::no_shift;
794 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
799 bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
800 SDValue &Offset, SDValue &Opc) {
801 unsigned Opcode = Op->getOpcode();
802 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
803 ? cast<LoadSDNode>(Op)->getAddressingMode()
804 : cast<StoreSDNode>(Op)->getAddressingMode();
805 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
806 ? ARM_AM::add : ARM_AM::sub;
808 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
812 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
814 if (ShOpcVal != ARM_AM::no_shift) {
815 // Check to see if the RHS of the shift is a constant, if not, we can't fold
817 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
818 ShAmt = Sh->getZExtValue();
819 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
820 Offset = N.getOperand(0);
823 ShOpcVal = ARM_AM::no_shift;
826 ShOpcVal = ARM_AM::no_shift;
830 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
835 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
836 SDValue &Offset, SDValue &Opc) {
837 unsigned Opcode = Op->getOpcode();
838 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
839 ? cast<LoadSDNode>(Op)->getAddressingMode()
840 : cast<StoreSDNode>(Op)->getAddressingMode();
841 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
842 ? ARM_AM::add : ARM_AM::sub;
844 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
845 if (AddSub == ARM_AM::sub) Val *= -1;
846 Offset = CurDAG->getRegister(0, MVT::i32);
847 Opc = CurDAG->getTargetConstant(Val, MVT::i32);
855 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
856 SDValue &Offset, SDValue &Opc) {
857 unsigned Opcode = Op->getOpcode();
858 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
859 ? cast<LoadSDNode>(Op)->getAddressingMode()
860 : cast<StoreSDNode>(Op)->getAddressingMode();
861 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
862 ? ARM_AM::add : ARM_AM::sub;
864 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
865 Offset = CurDAG->getRegister(0, MVT::i32);
866 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
875 bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
880 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
881 SDValue &Base, SDValue &Offset,
883 if (N.getOpcode() == ISD::SUB) {
884 // X - C is canonicalize to X + -C, no need to handle it here.
885 Base = N.getOperand(0);
886 Offset = N.getOperand(1);
887 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
891 if (!CurDAG->isBaseWithConstantOffset(N)) {
893 if (N.getOpcode() == ISD::FrameIndex) {
894 int FI = cast<FrameIndexSDNode>(N)->getIndex();
895 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
897 Offset = CurDAG->getRegister(0, MVT::i32);
898 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
902 // If the RHS is +/- imm8, fold into addr mode.
904 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
905 -256 + 1, 256, RHSC)) { // 8 bits.
906 Base = N.getOperand(0);
907 if (Base.getOpcode() == ISD::FrameIndex) {
908 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
909 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
911 Offset = CurDAG->getRegister(0, MVT::i32);
913 ARM_AM::AddrOpc AddSub = ARM_AM::add;
915 AddSub = ARM_AM::sub;
918 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
922 Base = N.getOperand(0);
923 Offset = N.getOperand(1);
924 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
928 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
929 SDValue &Offset, SDValue &Opc) {
930 unsigned Opcode = Op->getOpcode();
931 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
932 ? cast<LoadSDNode>(Op)->getAddressingMode()
933 : cast<StoreSDNode>(Op)->getAddressingMode();
934 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
935 ? ARM_AM::add : ARM_AM::sub;
937 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
938 Offset = CurDAG->getRegister(0, MVT::i32);
939 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
944 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
948 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
949 SDValue &Base, SDValue &Offset) {
950 if (!CurDAG->isBaseWithConstantOffset(N)) {
952 if (N.getOpcode() == ISD::FrameIndex) {
953 int FI = cast<FrameIndexSDNode>(N)->getIndex();
954 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
955 } else if (N.getOpcode() == ARMISD::Wrapper &&
956 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
957 Base = N.getOperand(0);
959 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
964 // If the RHS is +/- imm8, fold into addr mode.
966 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
967 -256 + 1, 256, RHSC)) {
968 Base = N.getOperand(0);
969 if (Base.getOpcode() == ISD::FrameIndex) {
970 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
971 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
974 ARM_AM::AddrOpc AddSub = ARM_AM::add;
976 AddSub = ARM_AM::sub;
979 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
985 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
990 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
994 unsigned Alignment = 0;
996 MemSDNode *MemN = cast<MemSDNode>(Parent);
998 if (isa<LSBaseSDNode>(MemN) ||
999 ((MemN->getOpcode() == ARMISD::VST1_UPD ||
1000 MemN->getOpcode() == ARMISD::VLD1_UPD) &&
1001 MemN->getConstantOperandVal(MemN->getNumOperands() - 1) == 1)) {
1002 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
1003 // The maximum alignment is equal to the memory size being referenced.
1004 unsigned MMOAlign = MemN->getAlignment();
1005 unsigned MemSize = MemN->getMemoryVT().getSizeInBits() / 8;
1006 if (MMOAlign >= MemSize && MemSize > 1)
1007 Alignment = MemSize;
1009 // All other uses of addrmode6 are for intrinsics. For now just record
1010 // the raw alignment value; it will be refined later based on the legal
1011 // alignment operands for the intrinsic.
1012 Alignment = MemN->getAlignment();
1015 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1019 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
1021 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
1022 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
1023 if (AM != ISD::POST_INC)
1026 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
1027 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
1028 Offset = CurDAG->getRegister(0, MVT::i32);
1033 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
1034 SDValue &Offset, SDValue &Label) {
1035 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
1036 Offset = N.getOperand(0);
1037 SDValue N1 = N.getOperand(1);
1038 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
1047 //===----------------------------------------------------------------------===//
1048 // Thumb Addressing Modes
1049 //===----------------------------------------------------------------------===//
1051 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
1052 SDValue &Base, SDValue &Offset){
1053 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
1054 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
1055 if (!NC || !NC->isNullValue())
1062 Base = N.getOperand(0);
1063 Offset = N.getOperand(1);
1068 ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
1069 SDValue &Offset, unsigned Scale) {
1071 SDValue TmpBase, TmpOffImm;
1072 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1073 return false; // We want to select tLDRspi / tSTRspi instead.
1075 if (N.getOpcode() == ARMISD::Wrapper &&
1076 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1077 return false; // We want to select tLDRpci instead.
1080 if (!CurDAG->isBaseWithConstantOffset(N))
1083 // Thumb does not have [sp, r] address mode.
1084 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1085 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1086 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1087 (RHSR && RHSR->getReg() == ARM::SP))
1090 // FIXME: Why do we explicitly check for a match here and then return false?
1091 // Presumably to allow something else to match, but shouldn't this be
1094 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
1097 Base = N.getOperand(0);
1098 Offset = N.getOperand(1);
1103 ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
1106 return SelectThumbAddrModeRI(N, Base, Offset, 1);
1110 ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
1113 return SelectThumbAddrModeRI(N, Base, Offset, 2);
1117 ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
1120 return SelectThumbAddrModeRI(N, Base, Offset, 4);
1124 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1125 SDValue &Base, SDValue &OffImm) {
1127 SDValue TmpBase, TmpOffImm;
1128 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1129 return false; // We want to select tLDRspi / tSTRspi instead.
1131 if (N.getOpcode() == ARMISD::Wrapper &&
1132 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1133 return false; // We want to select tLDRpci instead.
1136 if (!CurDAG->isBaseWithConstantOffset(N)) {
1137 if (N.getOpcode() == ARMISD::Wrapper &&
1138 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
1139 Base = N.getOperand(0);
1144 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1148 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1149 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1150 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1151 (RHSR && RHSR->getReg() == ARM::SP)) {
1152 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
1153 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1154 unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
1155 unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
1157 // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
1158 if (LHSC != 0 || RHSC != 0) return false;
1161 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1165 // If the RHS is + imm5 * scale, fold into addr mode.
1167 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1168 Base = N.getOperand(0);
1169 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1173 Base = N.getOperand(0);
1174 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1179 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1181 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1185 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1187 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1191 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1193 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1196 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1197 SDValue &Base, SDValue &OffImm) {
1198 if (N.getOpcode() == ISD::FrameIndex) {
1199 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1200 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1201 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1205 if (!CurDAG->isBaseWithConstantOffset(N))
1208 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1209 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1210 (LHSR && LHSR->getReg() == ARM::SP)) {
1211 // If the RHS is + imm8 * scale, fold into addr mode.
1213 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1214 Base = N.getOperand(0);
1215 if (Base.getOpcode() == ISD::FrameIndex) {
1216 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1217 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1219 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1228 //===----------------------------------------------------------------------===//
1229 // Thumb 2 Addressing Modes
1230 //===----------------------------------------------------------------------===//
1233 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1235 if (DisableShifterOp)
1238 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
1240 // Don't match base register only case. That is matched to a separate
1241 // lower complexity pattern with explicit register operand.
1242 if (ShOpcVal == ARM_AM::no_shift) return false;
1244 BaseReg = N.getOperand(0);
1245 unsigned ShImmVal = 0;
1246 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1247 ShImmVal = RHS->getZExtValue() & 31;
1248 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
1255 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1256 SDValue &Base, SDValue &OffImm) {
1257 // Match simple R + imm12 operands.
1260 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1261 !CurDAG->isBaseWithConstantOffset(N)) {
1262 if (N.getOpcode() == ISD::FrameIndex) {
1263 // Match frame index.
1264 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1265 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1266 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1270 if (N.getOpcode() == ARMISD::Wrapper &&
1271 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
1272 Base = N.getOperand(0);
1273 if (Base.getOpcode() == ISD::TargetConstantPool)
1274 return false; // We want to select t2LDRpci instead.
1277 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1281 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1282 if (SelectT2AddrModeImm8(N, Base, OffImm))
1283 // Let t2LDRi8 handle (R - imm8).
1286 int RHSC = (int)RHS->getZExtValue();
1287 if (N.getOpcode() == ISD::SUB)
1290 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1291 Base = N.getOperand(0);
1292 if (Base.getOpcode() == ISD::FrameIndex) {
1293 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1294 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1296 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1303 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1307 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1308 SDValue &Base, SDValue &OffImm) {
1309 // Match simple R - imm8 operands.
1310 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1311 !CurDAG->isBaseWithConstantOffset(N))
1314 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1315 int RHSC = (int)RHS->getSExtValue();
1316 if (N.getOpcode() == ISD::SUB)
1319 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1320 Base = N.getOperand(0);
1321 if (Base.getOpcode() == ISD::FrameIndex) {
1322 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1323 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1325 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1333 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1335 unsigned Opcode = Op->getOpcode();
1336 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1337 ? cast<LoadSDNode>(Op)->getAddressingMode()
1338 : cast<StoreSDNode>(Op)->getAddressingMode();
1340 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1341 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1342 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
1343 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
1350 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1352 SDValue &OffReg, SDValue &ShImm) {
1353 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1354 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1357 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1358 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1359 int RHSC = (int)RHS->getZExtValue();
1360 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1362 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1366 // Look for (R + R) or (R + (R << [1,2,3])).
1368 Base = N.getOperand(0);
1369 OffReg = N.getOperand(1);
1371 // Swap if it is ((R << c) + R).
1372 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1373 if (ShOpcVal != ARM_AM::lsl) {
1374 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1375 if (ShOpcVal == ARM_AM::lsl)
1376 std::swap(Base, OffReg);
1379 if (ShOpcVal == ARM_AM::lsl) {
1380 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1382 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1383 ShAmt = Sh->getZExtValue();
1384 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1385 OffReg = OffReg.getOperand(0);
1392 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
1397 bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
1399 // This *must* succeed since it's used for the irreplaceable ldrex and strex
1402 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1404 if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
1407 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1411 uint32_t RHSC = (int)RHS->getZExtValue();
1412 if (RHSC > 1020 || RHSC % 4 != 0)
1415 Base = N.getOperand(0);
1416 if (Base.getOpcode() == ISD::FrameIndex) {
1417 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1418 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1421 OffImm = CurDAG->getTargetConstant(RHSC / 4, MVT::i32);
1425 //===--------------------------------------------------------------------===//
1427 /// getAL - Returns a ARMCC::AL immediate node.
1428 static inline SDValue getAL(SelectionDAG *CurDAG) {
1429 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
1432 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1433 LoadSDNode *LD = cast<LoadSDNode>(N);
1434 ISD::MemIndexedMode AM = LD->getAddressingMode();
1435 if (AM == ISD::UNINDEXED)
1438 EVT LoadedVT = LD->getMemoryVT();
1439 SDValue Offset, AMOpc;
1440 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1441 unsigned Opcode = 0;
1443 if (LoadedVT == MVT::i32 && isPre &&
1444 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1445 Opcode = ARM::LDR_PRE_IMM;
1447 } else if (LoadedVT == MVT::i32 && !isPre &&
1448 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1449 Opcode = ARM::LDR_POST_IMM;
1451 } else if (LoadedVT == MVT::i32 &&
1452 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1453 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1456 } else if (LoadedVT == MVT::i16 &&
1457 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1459 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1460 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1461 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1462 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1463 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1464 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1466 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1470 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1472 Opcode = ARM::LDRB_PRE_IMM;
1473 } else if (!isPre &&
1474 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1476 Opcode = ARM::LDRB_POST_IMM;
1477 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1479 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1485 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1486 SDValue Chain = LD->getChain();
1487 SDValue Base = LD->getBasePtr();
1488 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
1489 CurDAG->getRegister(0, MVT::i32), Chain };
1490 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1491 MVT::i32, MVT::Other, Ops);
1493 SDValue Chain = LD->getChain();
1494 SDValue Base = LD->getBasePtr();
1495 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
1496 CurDAG->getRegister(0, MVT::i32), Chain };
1497 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1498 MVT::i32, MVT::Other, Ops);
1505 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1506 LoadSDNode *LD = cast<LoadSDNode>(N);
1507 ISD::MemIndexedMode AM = LD->getAddressingMode();
1508 if (AM == ISD::UNINDEXED)
1511 EVT LoadedVT = LD->getMemoryVT();
1512 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1514 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1515 unsigned Opcode = 0;
1517 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1518 switch (LoadedVT.getSimpleVT().SimpleTy) {
1520 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1524 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1526 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1531 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1533 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1542 SDValue Chain = LD->getChain();
1543 SDValue Base = LD->getBasePtr();
1544 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
1545 CurDAG->getRegister(0, MVT::i32), Chain };
1546 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1553 /// \brief Form a GPRPair pseudo register from a pair of GPR regs.
1554 SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
1555 SDLoc dl(V0.getNode());
1557 CurDAG->getTargetConstant(ARM::GPRPairRegClassID, MVT::i32);
1558 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
1559 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
1560 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1561 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1564 /// \brief Form a D register from a pair of S registers.
1565 SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1566 SDLoc dl(V0.getNode());
1568 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
1569 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1570 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1571 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1572 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1575 /// \brief Form a quad register from a pair of D registers.
1576 SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1577 SDLoc dl(V0.getNode());
1578 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
1579 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1580 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1581 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1582 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1585 /// \brief Form 4 consecutive D registers from a pair of Q registers.
1586 SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1587 SDLoc dl(V0.getNode());
1588 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1589 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1590 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1591 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1592 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1595 /// \brief Form 4 consecutive S registers.
1596 SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
1597 SDValue V2, SDValue V3) {
1598 SDLoc dl(V0.getNode());
1600 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
1601 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1602 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1603 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
1604 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
1605 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1606 V2, SubReg2, V3, SubReg3 };
1607 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1610 /// \brief Form 4 consecutive D registers.
1611 SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
1612 SDValue V2, SDValue V3) {
1613 SDLoc dl(V0.getNode());
1614 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1615 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1616 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1617 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
1618 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
1619 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1620 V2, SubReg2, V3, SubReg3 };
1621 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1624 /// \brief Form 4 consecutive Q registers.
1625 SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
1626 SDValue V2, SDValue V3) {
1627 SDLoc dl(V0.getNode());
1628 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
1629 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1630 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1631 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
1632 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
1633 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1634 V2, SubReg2, V3, SubReg3 };
1635 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1638 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1639 /// of a NEON VLD or VST instruction. The supported values depend on the
1640 /// number of registers being loaded.
1641 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
1642 bool is64BitVector) {
1643 unsigned NumRegs = NumVecs;
1644 if (!is64BitVector && NumVecs < 3)
1647 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1648 if (Alignment >= 32 && NumRegs == 4)
1650 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1652 else if (Alignment >= 8)
1657 return CurDAG->getTargetConstant(Alignment, MVT::i32);
1660 static bool isVLDfixed(unsigned Opc)
1663 default: return false;
1664 case ARM::VLD1d8wb_fixed : return true;
1665 case ARM::VLD1d16wb_fixed : return true;
1666 case ARM::VLD1d64Qwb_fixed : return true;
1667 case ARM::VLD1d32wb_fixed : return true;
1668 case ARM::VLD1d64wb_fixed : return true;
1669 case ARM::VLD1d64TPseudoWB_fixed : return true;
1670 case ARM::VLD1d64QPseudoWB_fixed : return true;
1671 case ARM::VLD1q8wb_fixed : return true;
1672 case ARM::VLD1q16wb_fixed : return true;
1673 case ARM::VLD1q32wb_fixed : return true;
1674 case ARM::VLD1q64wb_fixed : return true;
1675 case ARM::VLD2d8wb_fixed : return true;
1676 case ARM::VLD2d16wb_fixed : return true;
1677 case ARM::VLD2d32wb_fixed : return true;
1678 case ARM::VLD2q8PseudoWB_fixed : return true;
1679 case ARM::VLD2q16PseudoWB_fixed : return true;
1680 case ARM::VLD2q32PseudoWB_fixed : return true;
1681 case ARM::VLD2DUPd8wb_fixed : return true;
1682 case ARM::VLD2DUPd16wb_fixed : return true;
1683 case ARM::VLD2DUPd32wb_fixed : return true;
1687 static bool isVSTfixed(unsigned Opc)
1690 default: return false;
1691 case ARM::VST1d8wb_fixed : return true;
1692 case ARM::VST1d16wb_fixed : return true;
1693 case ARM::VST1d32wb_fixed : return true;
1694 case ARM::VST1d64wb_fixed : return true;
1695 case ARM::VST1q8wb_fixed : return true;
1696 case ARM::VST1q16wb_fixed : return true;
1697 case ARM::VST1q32wb_fixed : return true;
1698 case ARM::VST1q64wb_fixed : return true;
1699 case ARM::VST1d64TPseudoWB_fixed : return true;
1700 case ARM::VST1d64QPseudoWB_fixed : return true;
1701 case ARM::VST2d8wb_fixed : return true;
1702 case ARM::VST2d16wb_fixed : return true;
1703 case ARM::VST2d32wb_fixed : return true;
1704 case ARM::VST2q8PseudoWB_fixed : return true;
1705 case ARM::VST2q16PseudoWB_fixed : return true;
1706 case ARM::VST2q32PseudoWB_fixed : return true;
1710 // Get the register stride update opcode of a VLD/VST instruction that
1711 // is otherwise equivalent to the given fixed stride updating instruction.
1712 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
1713 assert((isVLDfixed(Opc) || isVSTfixed(Opc))
1714 && "Incorrect fixed stride updating instruction.");
1717 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
1718 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
1719 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
1720 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
1721 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
1722 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
1723 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
1724 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
1725 case ARM::VLD1d64Twb_fixed: return ARM::VLD1d64Twb_register;
1726 case ARM::VLD1d64Qwb_fixed: return ARM::VLD1d64Qwb_register;
1727 case ARM::VLD1d64TPseudoWB_fixed: return ARM::VLD1d64TPseudoWB_register;
1728 case ARM::VLD1d64QPseudoWB_fixed: return ARM::VLD1d64QPseudoWB_register;
1730 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
1731 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
1732 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
1733 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
1734 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
1735 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
1736 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
1737 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
1738 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
1739 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
1741 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
1742 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
1743 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
1744 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
1745 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
1746 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
1748 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
1749 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
1750 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
1751 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
1752 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
1753 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
1755 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
1756 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
1757 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
1759 return Opc; // If not one we handle, return it unchanged.
1762 SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
1763 const uint16_t *DOpcodes,
1764 const uint16_t *QOpcodes0,
1765 const uint16_t *QOpcodes1) {
1766 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
1769 SDValue MemAddr, Align;
1770 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1771 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1774 SDValue Chain = N->getOperand(0);
1775 EVT VT = N->getValueType(0);
1776 bool is64BitVector = VT.is64BitVector();
1777 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1779 unsigned OpcodeIndex;
1780 switch (VT.getSimpleVT().SimpleTy) {
1781 default: llvm_unreachable("unhandled vld type");
1782 // Double-register operations:
1783 case MVT::v8i8: OpcodeIndex = 0; break;
1784 case MVT::v4i16: OpcodeIndex = 1; break;
1786 case MVT::v2i32: OpcodeIndex = 2; break;
1787 case MVT::v1i64: OpcodeIndex = 3; break;
1788 // Quad-register operations:
1789 case MVT::v16i8: OpcodeIndex = 0; break;
1790 case MVT::v8i16: OpcodeIndex = 1; break;
1792 case MVT::v4i32: OpcodeIndex = 2; break;
1794 case MVT::v2i64: OpcodeIndex = 3;
1795 assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
1803 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1806 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1808 std::vector<EVT> ResTys;
1809 ResTys.push_back(ResTy);
1811 ResTys.push_back(MVT::i32);
1812 ResTys.push_back(MVT::Other);
1814 SDValue Pred = getAL(CurDAG);
1815 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1817 SmallVector<SDValue, 7> Ops;
1819 // Double registers and VLD1/VLD2 quad registers are directly supported.
1820 if (is64BitVector || NumVecs <= 2) {
1821 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1822 QOpcodes0[OpcodeIndex]);
1823 Ops.push_back(MemAddr);
1824 Ops.push_back(Align);
1826 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1827 // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
1828 // case entirely when the rest are updated to that form, too.
1829 if ((NumVecs <= 2) && !isa<ConstantSDNode>(Inc.getNode()))
1830 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1831 // FIXME: We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
1832 // check for that explicitly too. Horribly hacky, but temporary.
1833 if ((NumVecs > 2 && !isVLDfixed(Opc)) ||
1834 !isa<ConstantSDNode>(Inc.getNode()))
1835 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1837 Ops.push_back(Pred);
1838 Ops.push_back(Reg0);
1839 Ops.push_back(Chain);
1840 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1843 // Otherwise, quad registers are loaded with two separate instructions,
1844 // where one loads the even registers and the other loads the odd registers.
1845 EVT AddrTy = MemAddr.getValueType();
1847 // Load the even subregs. This is always an updating load, so that it
1848 // provides the address to the second load for the odd subregs.
1850 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1851 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1852 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1853 ResTy, AddrTy, MVT::Other, OpsA);
1854 Chain = SDValue(VLdA, 2);
1856 // Load the odd subregs.
1857 Ops.push_back(SDValue(VLdA, 1));
1858 Ops.push_back(Align);
1860 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1861 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1862 "only constant post-increment update allowed for VLD3/4");
1864 Ops.push_back(Reg0);
1866 Ops.push_back(SDValue(VLdA, 0));
1867 Ops.push_back(Pred);
1868 Ops.push_back(Reg0);
1869 Ops.push_back(Chain);
1870 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops);
1873 // Transfer memoperands.
1874 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1875 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1876 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
1881 // Extract out the subregisters.
1882 SDValue SuperReg = SDValue(VLd, 0);
1883 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1884 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1885 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
1886 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1887 ReplaceUses(SDValue(N, Vec),
1888 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1889 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1891 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
1895 SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
1896 const uint16_t *DOpcodes,
1897 const uint16_t *QOpcodes0,
1898 const uint16_t *QOpcodes1) {
1899 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1902 SDValue MemAddr, Align;
1903 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1904 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1905 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1908 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1909 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1911 SDValue Chain = N->getOperand(0);
1912 EVT VT = N->getOperand(Vec0Idx).getValueType();
1913 bool is64BitVector = VT.is64BitVector();
1914 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1916 unsigned OpcodeIndex;
1917 switch (VT.getSimpleVT().SimpleTy) {
1918 default: llvm_unreachable("unhandled vst type");
1919 // Double-register operations:
1920 case MVT::v8i8: OpcodeIndex = 0; break;
1921 case MVT::v4i16: OpcodeIndex = 1; break;
1923 case MVT::v2i32: OpcodeIndex = 2; break;
1924 case MVT::v1i64: OpcodeIndex = 3; break;
1925 // Quad-register operations:
1926 case MVT::v16i8: OpcodeIndex = 0; break;
1927 case MVT::v8i16: OpcodeIndex = 1; break;
1929 case MVT::v4i32: OpcodeIndex = 2; break;
1931 case MVT::v2i64: OpcodeIndex = 3;
1932 assert(NumVecs == 1 && "v2i64 type only supported for VST1");
1936 std::vector<EVT> ResTys;
1938 ResTys.push_back(MVT::i32);
1939 ResTys.push_back(MVT::Other);
1941 SDValue Pred = getAL(CurDAG);
1942 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1943 SmallVector<SDValue, 7> Ops;
1945 // Double registers and VST1/VST2 quad registers are directly supported.
1946 if (is64BitVector || NumVecs <= 2) {
1949 SrcReg = N->getOperand(Vec0Idx);
1950 } else if (is64BitVector) {
1951 // Form a REG_SEQUENCE to force register allocation.
1952 SDValue V0 = N->getOperand(Vec0Idx + 0);
1953 SDValue V1 = N->getOperand(Vec0Idx + 1);
1955 SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
1957 SDValue V2 = N->getOperand(Vec0Idx + 2);
1958 // If it's a vst3, form a quad D-register and leave the last part as
1960 SDValue V3 = (NumVecs == 3)
1961 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1962 : N->getOperand(Vec0Idx + 3);
1963 SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
1966 // Form a QQ register.
1967 SDValue Q0 = N->getOperand(Vec0Idx);
1968 SDValue Q1 = N->getOperand(Vec0Idx + 1);
1969 SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0);
1972 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1973 QOpcodes0[OpcodeIndex]);
1974 Ops.push_back(MemAddr);
1975 Ops.push_back(Align);
1977 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1978 // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
1979 // case entirely when the rest are updated to that form, too.
1980 if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
1981 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1982 // FIXME: We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
1983 // check for that explicitly too. Horribly hacky, but temporary.
1984 if (!isa<ConstantSDNode>(Inc.getNode()))
1986 else if (NumVecs > 2 && !isVSTfixed(Opc))
1987 Ops.push_back(Reg0);
1989 Ops.push_back(SrcReg);
1990 Ops.push_back(Pred);
1991 Ops.push_back(Reg0);
1992 Ops.push_back(Chain);
1993 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1995 // Transfer memoperands.
1996 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
2001 // Otherwise, quad registers are stored with two separate instructions,
2002 // where one stores the even registers and the other stores the odd registers.
2004 // Form the QQQQ REG_SEQUENCE.
2005 SDValue V0 = N->getOperand(Vec0Idx + 0);
2006 SDValue V1 = N->getOperand(Vec0Idx + 1);
2007 SDValue V2 = N->getOperand(Vec0Idx + 2);
2008 SDValue V3 = (NumVecs == 3)
2009 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2010 : N->getOperand(Vec0Idx + 3);
2011 SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2013 // Store the even D registers. This is always an updating store, so that it
2014 // provides the address to the second store for the odd subregs.
2015 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
2016 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2017 MemAddr.getValueType(),
2019 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
2020 Chain = SDValue(VStA, 1);
2022 // Store the odd D registers.
2023 Ops.push_back(SDValue(VStA, 0));
2024 Ops.push_back(Align);
2026 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2027 assert(isa<ConstantSDNode>(Inc.getNode()) &&
2028 "only constant post-increment update allowed for VST3/4");
2030 Ops.push_back(Reg0);
2032 Ops.push_back(RegSeq);
2033 Ops.push_back(Pred);
2034 Ops.push_back(Reg0);
2035 Ops.push_back(Chain);
2036 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
2038 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
2042 SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
2043 bool isUpdating, unsigned NumVecs,
2044 const uint16_t *DOpcodes,
2045 const uint16_t *QOpcodes) {
2046 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
2049 SDValue MemAddr, Align;
2050 unsigned AddrOpIdx = isUpdating ? 1 : 2;
2051 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2052 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2055 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2056 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2058 SDValue Chain = N->getOperand(0);
2060 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
2061 EVT VT = N->getOperand(Vec0Idx).getValueType();
2062 bool is64BitVector = VT.is64BitVector();
2064 unsigned Alignment = 0;
2066 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2067 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2068 if (Alignment > NumBytes)
2069 Alignment = NumBytes;
2070 if (Alignment < 8 && Alignment < NumBytes)
2072 // Alignment must be a power of two; make sure of that.
2073 Alignment = (Alignment & -Alignment);
2077 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2079 unsigned OpcodeIndex;
2080 switch (VT.getSimpleVT().SimpleTy) {
2081 default: llvm_unreachable("unhandled vld/vst lane type");
2082 // Double-register operations:
2083 case MVT::v8i8: OpcodeIndex = 0; break;
2084 case MVT::v4i16: OpcodeIndex = 1; break;
2086 case MVT::v2i32: OpcodeIndex = 2; break;
2087 // Quad-register operations:
2088 case MVT::v8i16: OpcodeIndex = 0; break;
2090 case MVT::v4i32: OpcodeIndex = 1; break;
2093 std::vector<EVT> ResTys;
2095 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2098 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
2099 MVT::i64, ResTyElts));
2102 ResTys.push_back(MVT::i32);
2103 ResTys.push_back(MVT::Other);
2105 SDValue Pred = getAL(CurDAG);
2106 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2108 SmallVector<SDValue, 8> Ops;
2109 Ops.push_back(MemAddr);
2110 Ops.push_back(Align);
2112 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2113 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
2117 SDValue V0 = N->getOperand(Vec0Idx + 0);
2118 SDValue V1 = N->getOperand(Vec0Idx + 1);
2121 SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2123 SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0);
2125 SDValue V2 = N->getOperand(Vec0Idx + 2);
2126 SDValue V3 = (NumVecs == 3)
2127 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2128 : N->getOperand(Vec0Idx + 3);
2130 SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2132 SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2134 Ops.push_back(SuperReg);
2135 Ops.push_back(getI32Imm(Lane));
2136 Ops.push_back(Pred);
2137 Ops.push_back(Reg0);
2138 Ops.push_back(Chain);
2140 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2141 QOpcodes[OpcodeIndex]);
2142 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2143 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
2147 // Extract the subregisters.
2148 SuperReg = SDValue(VLdLn, 0);
2149 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
2150 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
2151 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
2152 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2153 ReplaceUses(SDValue(N, Vec),
2154 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2155 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
2157 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
2161 SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
2163 const uint16_t *Opcodes) {
2164 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
2167 SDValue MemAddr, Align;
2168 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
2171 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2172 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2174 SDValue Chain = N->getOperand(0);
2175 EVT VT = N->getValueType(0);
2177 unsigned Alignment = 0;
2179 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2180 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2181 if (Alignment > NumBytes)
2182 Alignment = NumBytes;
2183 if (Alignment < 8 && Alignment < NumBytes)
2185 // Alignment must be a power of two; make sure of that.
2186 Alignment = (Alignment & -Alignment);
2190 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2192 unsigned OpcodeIndex;
2193 switch (VT.getSimpleVT().SimpleTy) {
2194 default: llvm_unreachable("unhandled vld-dup type");
2195 case MVT::v8i8: OpcodeIndex = 0; break;
2196 case MVT::v4i16: OpcodeIndex = 1; break;
2198 case MVT::v2i32: OpcodeIndex = 2; break;
2201 SDValue Pred = getAL(CurDAG);
2202 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2204 unsigned Opc = Opcodes[OpcodeIndex];
2205 SmallVector<SDValue, 6> Ops;
2206 Ops.push_back(MemAddr);
2207 Ops.push_back(Align);
2209 // fixed-stride update instructions don't have an explicit writeback
2210 // operand. It's implicit in the opcode itself.
2211 SDValue Inc = N->getOperand(2);
2212 if (!isa<ConstantSDNode>(Inc.getNode()))
2214 // FIXME: VLD3 and VLD4 haven't been updated to that form yet.
2215 else if (NumVecs > 2)
2216 Ops.push_back(Reg0);
2218 Ops.push_back(Pred);
2219 Ops.push_back(Reg0);
2220 Ops.push_back(Chain);
2222 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2223 std::vector<EVT> ResTys;
2224 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
2226 ResTys.push_back(MVT::i32);
2227 ResTys.push_back(MVT::Other);
2228 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2229 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
2230 SuperReg = SDValue(VLdDup, 0);
2232 // Extract the subregisters.
2233 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
2234 unsigned SubIdx = ARM::dsub_0;
2235 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2236 ReplaceUses(SDValue(N, Vec),
2237 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
2238 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
2240 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
2244 SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
2246 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
2248 EVT VT = N->getValueType(0);
2249 unsigned FirstTblReg = IsExt ? 2 : 1;
2251 // Form a REG_SEQUENCE to force register allocation.
2253 SDValue V0 = N->getOperand(FirstTblReg + 0);
2254 SDValue V1 = N->getOperand(FirstTblReg + 1);
2256 RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
2258 SDValue V2 = N->getOperand(FirstTblReg + 2);
2259 // If it's a vtbl3, form a quad D-register and leave the last part as
2261 SDValue V3 = (NumVecs == 3)
2262 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2263 : N->getOperand(FirstTblReg + 3);
2264 RegSeq = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2267 SmallVector<SDValue, 6> Ops;
2269 Ops.push_back(N->getOperand(1));
2270 Ops.push_back(RegSeq);
2271 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
2272 Ops.push_back(getAL(CurDAG)); // predicate
2273 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
2274 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2277 SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
2279 if (!Subtarget->hasV6T2Ops())
2282 unsigned Opc = isSigned
2283 ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
2284 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
2286 // For unsigned extracts, check for a shift right and mask
2287 unsigned And_imm = 0;
2288 if (N->getOpcode() == ISD::AND) {
2289 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
2291 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2292 if (And_imm & (And_imm + 1))
2295 unsigned Srl_imm = 0;
2296 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
2298 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2300 // Note: The width operand is encoded as width-1.
2301 unsigned Width = countTrailingOnes(And_imm) - 1;
2302 unsigned LSB = Srl_imm;
2304 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2306 if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
2307 // It's cheaper to use a right shift to extract the top bits.
2308 if (Subtarget->isThumb()) {
2309 Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
2310 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2311 CurDAG->getTargetConstant(LSB, MVT::i32),
2312 getAL(CurDAG), Reg0, Reg0 };
2313 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2316 // ARM models shift instructions as MOVsi with shifter operand.
2317 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL);
2319 CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB),
2321 SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
2322 getAL(CurDAG), Reg0, Reg0 };
2323 return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
2326 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2327 CurDAG->getTargetConstant(LSB, MVT::i32),
2328 CurDAG->getTargetConstant(Width, MVT::i32),
2329 getAL(CurDAG), Reg0 };
2330 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2336 // Otherwise, we're looking for a shift of a shift
2337 unsigned Shl_imm = 0;
2338 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
2339 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
2340 unsigned Srl_imm = 0;
2341 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
2342 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2343 // Note: The width operand is encoded as width-1.
2344 unsigned Width = 32 - Srl_imm - 1;
2345 int LSB = Srl_imm - Shl_imm;
2348 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2349 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2350 CurDAG->getTargetConstant(LSB, MVT::i32),
2351 CurDAG->getTargetConstant(Width, MVT::i32),
2352 getAL(CurDAG), Reg0 };
2353 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2357 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG) {
2358 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
2360 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, LSB) &&
2361 !isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRA, LSB))
2364 if (LSB + Width > 32)
2367 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2368 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2369 CurDAG->getTargetConstant(LSB, MVT::i32),
2370 CurDAG->getTargetConstant(Width - 1, MVT::i32),
2371 getAL(CurDAG), Reg0 };
2372 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2378 /// Target-specific DAG combining for ISD::XOR.
2379 /// Target-independent combining lowers SELECT_CC nodes of the form
2380 /// select_cc setg[ge] X, 0, X, -X
2381 /// select_cc setgt X, -1, X, -X
2382 /// select_cc setl[te] X, 0, -X, X
2383 /// select_cc setlt X, 1, -X, X
2384 /// which represent Integer ABS into:
2385 /// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
2386 /// ARM instruction selection detects the latter and matches it to
2387 /// ARM::ABS or ARM::t2ABS machine node.
2388 SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
2389 SDValue XORSrc0 = N->getOperand(0);
2390 SDValue XORSrc1 = N->getOperand(1);
2391 EVT VT = N->getValueType(0);
2393 if (Subtarget->isThumb1Only())
2396 if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
2399 SDValue ADDSrc0 = XORSrc0.getOperand(0);
2400 SDValue ADDSrc1 = XORSrc0.getOperand(1);
2401 SDValue SRASrc0 = XORSrc1.getOperand(0);
2402 SDValue SRASrc1 = XORSrc1.getOperand(1);
2403 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
2404 EVT XType = SRASrc0.getValueType();
2405 unsigned Size = XType.getSizeInBits() - 1;
2407 if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
2408 XType.isInteger() && SRAConstant != nullptr &&
2409 Size == SRAConstant->getZExtValue()) {
2410 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
2411 return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
2417 SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2418 // The only time a CONCAT_VECTORS operation can have legal types is when
2419 // two 64-bit vectors are concatenated to a 128-bit vector.
2420 EVT VT = N->getValueType(0);
2421 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2422 llvm_unreachable("unexpected CONCAT_VECTORS");
2423 return createDRegPairNode(VT, N->getOperand(0), N->getOperand(1));
2426 SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2429 if (N->isMachineOpcode()) {
2431 return nullptr; // Already selected.
2434 switch (N->getOpcode()) {
2436 case ISD::INLINEASM: {
2437 SDNode *ResNode = SelectInlineAsm(N);
2443 // Select special operations if XOR node forms integer ABS pattern
2444 SDNode *ResNode = SelectABSOp(N);
2447 // Other cases are autogenerated.
2450 case ISD::Constant: {
2451 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2453 if (Subtarget->useMovt(*MF))
2454 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2455 // be done with MOV + MOVT, at worst.
2458 if (Subtarget->isThumb()) {
2459 UseCP = (Val > 255 && // MOV
2460 ~Val > 255 && // MOV + MVN
2461 !ARM_AM::isThumbImmShiftedVal(Val) && // MOV + LSL
2462 !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
2464 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2465 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2466 !ARM_AM::isSOImmTwoPartVal(Val) && // two instrs.
2467 !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
2471 SDValue CPIdx = CurDAG->getTargetConstantPool(
2472 ConstantInt::get(Type::getInt32Ty(*CurDAG->getContext()), Val),
2473 TLI->getPointerTy());
2476 if (Subtarget->isThumb()) {
2477 SDValue Pred = getAL(CurDAG);
2478 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2479 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2480 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2485 CurDAG->getTargetConstant(0, MVT::i32),
2487 CurDAG->getRegister(0, MVT::i32),
2488 CurDAG->getEntryNode()
2490 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2493 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2497 // Other cases are autogenerated.
2500 case ISD::FrameIndex: {
2501 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2502 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2503 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
2504 if (Subtarget->isThumb1Only()) {
2505 return CurDAG->SelectNodeTo(N, ARM::tADDframe, MVT::i32, TFI,
2506 CurDAG->getTargetConstant(0, MVT::i32));
2508 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2509 ARM::t2ADDri : ARM::ADDri);
2510 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2511 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2512 CurDAG->getRegister(0, MVT::i32) };
2513 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2517 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2520 case ISD::SIGN_EXTEND_INREG:
2522 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2526 if (Subtarget->isThumb1Only())
2528 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2529 unsigned RHSV = C->getZExtValue();
2531 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2532 unsigned ShImm = Log2_32(RHSV-1);
2535 SDValue V = N->getOperand(0);
2536 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2537 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2538 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2539 if (Subtarget->isThumb()) {
2540 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2541 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
2543 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2544 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
2547 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2548 unsigned ShImm = Log2_32(RHSV+1);
2551 SDValue V = N->getOperand(0);
2552 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2553 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2554 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2555 if (Subtarget->isThumb()) {
2556 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2557 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
2559 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2560 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
2566 // Check for unsigned bitfield extract
2567 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2570 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2571 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2572 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2573 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2574 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2575 EVT VT = N->getValueType(0);
2578 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2580 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2583 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2584 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2587 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2588 SDValue N2 = N0.getOperand(1);
2589 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2592 unsigned N1CVal = N1C->getZExtValue();
2593 unsigned N2CVal = N2C->getZExtValue();
2594 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2595 (N1CVal & 0xffffU) == 0xffffU &&
2596 (N2CVal & 0xffffU) == 0x0U) {
2597 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2599 SDValue Ops[] = { N0.getOperand(0), Imm16,
2600 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2601 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2606 case ARMISD::VMOVRRD:
2607 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2608 N->getOperand(0), getAL(CurDAG),
2609 CurDAG->getRegister(0, MVT::i32));
2610 case ISD::UMUL_LOHI: {
2611 if (Subtarget->isThumb1Only())
2613 if (Subtarget->isThumb()) {
2614 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2615 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2616 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops);
2618 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2619 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2620 CurDAG->getRegister(0, MVT::i32) };
2621 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2622 ARM::UMULL : ARM::UMULLv5,
2623 dl, MVT::i32, MVT::i32, Ops);
2626 case ISD::SMUL_LOHI: {
2627 if (Subtarget->isThumb1Only())
2629 if (Subtarget->isThumb()) {
2630 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2631 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2632 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops);
2634 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2635 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2636 CurDAG->getRegister(0, MVT::i32) };
2637 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2638 ARM::SMULL : ARM::SMULLv5,
2639 dl, MVT::i32, MVT::i32, Ops);
2642 case ARMISD::UMLAL:{
2643 if (Subtarget->isThumb()) {
2644 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2645 N->getOperand(3), getAL(CurDAG),
2646 CurDAG->getRegister(0, MVT::i32)};
2647 return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops);
2649 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2650 N->getOperand(3), getAL(CurDAG),
2651 CurDAG->getRegister(0, MVT::i32),
2652 CurDAG->getRegister(0, MVT::i32) };
2653 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2654 ARM::UMLAL : ARM::UMLALv5,
2655 dl, MVT::i32, MVT::i32, Ops);
2658 case ARMISD::SMLAL:{
2659 if (Subtarget->isThumb()) {
2660 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2661 N->getOperand(3), getAL(CurDAG),
2662 CurDAG->getRegister(0, MVT::i32)};
2663 return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops);
2665 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2666 N->getOperand(3), getAL(CurDAG),
2667 CurDAG->getRegister(0, MVT::i32),
2668 CurDAG->getRegister(0, MVT::i32) };
2669 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2670 ARM::SMLAL : ARM::SMLALv5,
2671 dl, MVT::i32, MVT::i32, Ops);
2675 SDNode *ResNode = nullptr;
2676 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2677 ResNode = SelectT2IndexedLoad(N);
2679 ResNode = SelectARMIndexedLoad(N);
2682 // Other cases are autogenerated.
2685 case ARMISD::BRCOND: {
2686 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2687 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2688 // Pattern complexity = 6 cost = 1 size = 0
2690 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2691 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2692 // Pattern complexity = 6 cost = 1 size = 0
2694 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2695 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2696 // Pattern complexity = 6 cost = 1 size = 0
2698 unsigned Opc = Subtarget->isThumb() ?
2699 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2700 SDValue Chain = N->getOperand(0);
2701 SDValue N1 = N->getOperand(1);
2702 SDValue N2 = N->getOperand(2);
2703 SDValue N3 = N->getOperand(3);
2704 SDValue InFlag = N->getOperand(4);
2705 assert(N1.getOpcode() == ISD::BasicBlock);
2706 assert(N2.getOpcode() == ISD::Constant);
2707 assert(N3.getOpcode() == ISD::Register);
2709 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2710 cast<ConstantSDNode>(N2)->getZExtValue()),
2712 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2713 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2715 Chain = SDValue(ResNode, 0);
2716 if (N->getNumValues() == 2) {
2717 InFlag = SDValue(ResNode, 1);
2718 ReplaceUses(SDValue(N, 1), InFlag);
2720 ReplaceUses(SDValue(N, 0),
2721 SDValue(Chain.getNode(), Chain.getResNo()));
2724 case ARMISD::VZIP: {
2726 EVT VT = N->getValueType(0);
2727 switch (VT.getSimpleVT().SimpleTy) {
2728 default: return nullptr;
2729 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2730 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2732 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2733 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2734 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2735 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2737 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2739 SDValue Pred = getAL(CurDAG);
2740 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2741 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2742 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2744 case ARMISD::VUZP: {
2746 EVT VT = N->getValueType(0);
2747 switch (VT.getSimpleVT().SimpleTy) {
2748 default: return nullptr;
2749 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2750 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2752 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2753 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2754 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2755 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2757 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2759 SDValue Pred = getAL(CurDAG);
2760 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2761 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2762 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2764 case ARMISD::VTRN: {
2766 EVT VT = N->getValueType(0);
2767 switch (VT.getSimpleVT().SimpleTy) {
2768 default: return nullptr;
2769 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2770 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2772 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2773 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2774 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2776 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2778 SDValue Pred = getAL(CurDAG);
2779 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2780 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2781 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2783 case ARMISD::BUILD_VECTOR: {
2784 EVT VecVT = N->getValueType(0);
2785 EVT EltVT = VecVT.getVectorElementType();
2786 unsigned NumElts = VecVT.getVectorNumElements();
2787 if (EltVT == MVT::f64) {
2788 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
2789 return createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2791 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
2793 return createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2794 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
2795 return createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
2796 N->getOperand(2), N->getOperand(3));
2799 case ARMISD::VLD2DUP: {
2800 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
2802 return SelectVLDDup(N, false, 2, Opcodes);
2805 case ARMISD::VLD3DUP: {
2806 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
2807 ARM::VLD3DUPd16Pseudo,
2808 ARM::VLD3DUPd32Pseudo };
2809 return SelectVLDDup(N, false, 3, Opcodes);
2812 case ARMISD::VLD4DUP: {
2813 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
2814 ARM::VLD4DUPd16Pseudo,
2815 ARM::VLD4DUPd32Pseudo };
2816 return SelectVLDDup(N, false, 4, Opcodes);
2819 case ARMISD::VLD2DUP_UPD: {
2820 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
2821 ARM::VLD2DUPd16wb_fixed,
2822 ARM::VLD2DUPd32wb_fixed };
2823 return SelectVLDDup(N, true, 2, Opcodes);
2826 case ARMISD::VLD3DUP_UPD: {
2827 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
2828 ARM::VLD3DUPd16Pseudo_UPD,
2829 ARM::VLD3DUPd32Pseudo_UPD };
2830 return SelectVLDDup(N, true, 3, Opcodes);
2833 case ARMISD::VLD4DUP_UPD: {
2834 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
2835 ARM::VLD4DUPd16Pseudo_UPD,
2836 ARM::VLD4DUPd32Pseudo_UPD };
2837 return SelectVLDDup(N, true, 4, Opcodes);
2840 case ARMISD::VLD1_UPD: {
2841 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
2842 ARM::VLD1d16wb_fixed,
2843 ARM::VLD1d32wb_fixed,
2844 ARM::VLD1d64wb_fixed };
2845 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
2846 ARM::VLD1q16wb_fixed,
2847 ARM::VLD1q32wb_fixed,
2848 ARM::VLD1q64wb_fixed };
2849 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr);
2852 case ARMISD::VLD2_UPD: {
2853 static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed,
2854 ARM::VLD2d16wb_fixed,
2855 ARM::VLD2d32wb_fixed,
2856 ARM::VLD1q64wb_fixed};
2857 static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
2858 ARM::VLD2q16PseudoWB_fixed,
2859 ARM::VLD2q32PseudoWB_fixed };
2860 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
2863 case ARMISD::VLD3_UPD: {
2864 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
2865 ARM::VLD3d16Pseudo_UPD,
2866 ARM::VLD3d32Pseudo_UPD,
2867 ARM::VLD1d64TPseudoWB_fixed};
2868 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2869 ARM::VLD3q16Pseudo_UPD,
2870 ARM::VLD3q32Pseudo_UPD };
2871 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
2872 ARM::VLD3q16oddPseudo_UPD,
2873 ARM::VLD3q32oddPseudo_UPD };
2874 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2877 case ARMISD::VLD4_UPD: {
2878 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD,
2879 ARM::VLD4d16Pseudo_UPD,
2880 ARM::VLD4d32Pseudo_UPD,
2881 ARM::VLD1d64QPseudoWB_fixed};
2882 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2883 ARM::VLD4q16Pseudo_UPD,
2884 ARM::VLD4q32Pseudo_UPD };
2885 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
2886 ARM::VLD4q16oddPseudo_UPD,
2887 ARM::VLD4q32oddPseudo_UPD };
2888 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2891 case ARMISD::VLD2LN_UPD: {
2892 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
2893 ARM::VLD2LNd16Pseudo_UPD,
2894 ARM::VLD2LNd32Pseudo_UPD };
2895 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
2896 ARM::VLD2LNq32Pseudo_UPD };
2897 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
2900 case ARMISD::VLD3LN_UPD: {
2901 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
2902 ARM::VLD3LNd16Pseudo_UPD,
2903 ARM::VLD3LNd32Pseudo_UPD };
2904 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
2905 ARM::VLD3LNq32Pseudo_UPD };
2906 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
2909 case ARMISD::VLD4LN_UPD: {
2910 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
2911 ARM::VLD4LNd16Pseudo_UPD,
2912 ARM::VLD4LNd32Pseudo_UPD };
2913 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
2914 ARM::VLD4LNq32Pseudo_UPD };
2915 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
2918 case ARMISD::VST1_UPD: {
2919 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
2920 ARM::VST1d16wb_fixed,
2921 ARM::VST1d32wb_fixed,
2922 ARM::VST1d64wb_fixed };
2923 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
2924 ARM::VST1q16wb_fixed,
2925 ARM::VST1q32wb_fixed,
2926 ARM::VST1q64wb_fixed };
2927 return SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr);
2930 case ARMISD::VST2_UPD: {
2931 static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed,
2932 ARM::VST2d16wb_fixed,
2933 ARM::VST2d32wb_fixed,
2934 ARM::VST1q64wb_fixed};
2935 static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
2936 ARM::VST2q16PseudoWB_fixed,
2937 ARM::VST2q32PseudoWB_fixed };
2938 return SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
2941 case ARMISD::VST3_UPD: {
2942 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
2943 ARM::VST3d16Pseudo_UPD,
2944 ARM::VST3d32Pseudo_UPD,
2945 ARM::VST1d64TPseudoWB_fixed};
2946 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
2947 ARM::VST3q16Pseudo_UPD,
2948 ARM::VST3q32Pseudo_UPD };
2949 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
2950 ARM::VST3q16oddPseudo_UPD,
2951 ARM::VST3q32oddPseudo_UPD };
2952 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2955 case ARMISD::VST4_UPD: {
2956 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD,
2957 ARM::VST4d16Pseudo_UPD,
2958 ARM::VST4d32Pseudo_UPD,
2959 ARM::VST1d64QPseudoWB_fixed};
2960 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
2961 ARM::VST4q16Pseudo_UPD,
2962 ARM::VST4q32Pseudo_UPD };
2963 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
2964 ARM::VST4q16oddPseudo_UPD,
2965 ARM::VST4q32oddPseudo_UPD };
2966 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2969 case ARMISD::VST2LN_UPD: {
2970 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
2971 ARM::VST2LNd16Pseudo_UPD,
2972 ARM::VST2LNd32Pseudo_UPD };
2973 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
2974 ARM::VST2LNq32Pseudo_UPD };
2975 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
2978 case ARMISD::VST3LN_UPD: {
2979 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
2980 ARM::VST3LNd16Pseudo_UPD,
2981 ARM::VST3LNd32Pseudo_UPD };
2982 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
2983 ARM::VST3LNq32Pseudo_UPD };
2984 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
2987 case ARMISD::VST4LN_UPD: {
2988 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
2989 ARM::VST4LNd16Pseudo_UPD,
2990 ARM::VST4LNd32Pseudo_UPD };
2991 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
2992 ARM::VST4LNq32Pseudo_UPD };
2993 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
2996 case ISD::INTRINSIC_VOID:
2997 case ISD::INTRINSIC_W_CHAIN: {
2998 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
3003 case Intrinsic::arm_ldaexd:
3004 case Intrinsic::arm_ldrexd: {
3006 SDValue Chain = N->getOperand(0);
3007 SDValue MemAddr = N->getOperand(2);
3008 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
3010 bool IsAcquire = IntNo == Intrinsic::arm_ldaexd;
3011 unsigned NewOpc = isThumb ? (IsAcquire ? ARM::t2LDAEXD : ARM::t2LDREXD)
3012 : (IsAcquire ? ARM::LDAEXD : ARM::LDREXD);
3014 // arm_ldrexd returns a i64 value in {i32, i32}
3015 std::vector<EVT> ResTys;
3017 ResTys.push_back(MVT::i32);
3018 ResTys.push_back(MVT::i32);
3020 ResTys.push_back(MVT::Untyped);
3021 ResTys.push_back(MVT::Other);
3023 // Place arguments in the right order.
3024 SmallVector<SDValue, 7> Ops;
3025 Ops.push_back(MemAddr);
3026 Ops.push_back(getAL(CurDAG));
3027 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3028 Ops.push_back(Chain);
3029 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3030 // Transfer memoperands.
3031 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3032 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3033 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
3036 SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1);
3037 if (!SDValue(N, 0).use_empty()) {
3040 Result = SDValue(Ld, 0);
3042 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
3043 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3044 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3045 Result = SDValue(ResNode,0);
3047 ReplaceUses(SDValue(N, 0), Result);
3049 if (!SDValue(N, 1).use_empty()) {
3052 Result = SDValue(Ld, 1);
3054 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
3055 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3056 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3057 Result = SDValue(ResNode,0);
3059 ReplaceUses(SDValue(N, 1), Result);
3061 ReplaceUses(SDValue(N, 2), OutChain);
3064 case Intrinsic::arm_stlexd:
3065 case Intrinsic::arm_strexd: {
3067 SDValue Chain = N->getOperand(0);
3068 SDValue Val0 = N->getOperand(2);
3069 SDValue Val1 = N->getOperand(3);
3070 SDValue MemAddr = N->getOperand(4);
3072 // Store exclusive double return a i32 value which is the return status
3073 // of the issued store.
3074 EVT ResTys[] = { MVT::i32, MVT::Other };
3076 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
3077 // Place arguments in the right order.
3078 SmallVector<SDValue, 7> Ops;
3080 Ops.push_back(Val0);
3081 Ops.push_back(Val1);
3083 // arm_strexd uses GPRPair.
3084 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0));
3085 Ops.push_back(MemAddr);
3086 Ops.push_back(getAL(CurDAG));
3087 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3088 Ops.push_back(Chain);
3090 bool IsRelease = IntNo == Intrinsic::arm_stlexd;
3091 unsigned NewOpc = isThumb ? (IsRelease ? ARM::t2STLEXD : ARM::t2STREXD)
3092 : (IsRelease ? ARM::STLEXD : ARM::STREXD);
3094 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3095 // Transfer memoperands.
3096 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3097 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3098 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
3103 case Intrinsic::arm_neon_vld1: {
3104 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
3105 ARM::VLD1d32, ARM::VLD1d64 };
3106 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
3107 ARM::VLD1q32, ARM::VLD1q64};
3108 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, nullptr);
3111 case Intrinsic::arm_neon_vld2: {
3112 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
3113 ARM::VLD2d32, ARM::VLD1q64 };
3114 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
3115 ARM::VLD2q32Pseudo };
3116 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
3119 case Intrinsic::arm_neon_vld3: {
3120 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
3123 ARM::VLD1d64TPseudo };
3124 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
3125 ARM::VLD3q16Pseudo_UPD,
3126 ARM::VLD3q32Pseudo_UPD };
3127 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
3128 ARM::VLD3q16oddPseudo,
3129 ARM::VLD3q32oddPseudo };
3130 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3133 case Intrinsic::arm_neon_vld4: {
3134 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
3137 ARM::VLD1d64QPseudo };
3138 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
3139 ARM::VLD4q16Pseudo_UPD,
3140 ARM::VLD4q32Pseudo_UPD };
3141 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
3142 ARM::VLD4q16oddPseudo,
3143 ARM::VLD4q32oddPseudo };
3144 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3147 case Intrinsic::arm_neon_vld2lane: {
3148 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
3149 ARM::VLD2LNd16Pseudo,
3150 ARM::VLD2LNd32Pseudo };
3151 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
3152 ARM::VLD2LNq32Pseudo };
3153 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
3156 case Intrinsic::arm_neon_vld3lane: {
3157 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
3158 ARM::VLD3LNd16Pseudo,
3159 ARM::VLD3LNd32Pseudo };
3160 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
3161 ARM::VLD3LNq32Pseudo };
3162 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
3165 case Intrinsic::arm_neon_vld4lane: {
3166 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
3167 ARM::VLD4LNd16Pseudo,
3168 ARM::VLD4LNd32Pseudo };
3169 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
3170 ARM::VLD4LNq32Pseudo };
3171 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
3174 case Intrinsic::arm_neon_vst1: {
3175 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
3176 ARM::VST1d32, ARM::VST1d64 };
3177 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
3178 ARM::VST1q32, ARM::VST1q64 };
3179 return SelectVST(N, false, 1, DOpcodes, QOpcodes, nullptr);
3182 case Intrinsic::arm_neon_vst2: {
3183 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
3184 ARM::VST2d32, ARM::VST1q64 };
3185 static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
3186 ARM::VST2q32Pseudo };
3187 return SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
3190 case Intrinsic::arm_neon_vst3: {
3191 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
3194 ARM::VST1d64TPseudo };
3195 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3196 ARM::VST3q16Pseudo_UPD,
3197 ARM::VST3q32Pseudo_UPD };
3198 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
3199 ARM::VST3q16oddPseudo,
3200 ARM::VST3q32oddPseudo };
3201 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3204 case Intrinsic::arm_neon_vst4: {
3205 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
3208 ARM::VST1d64QPseudo };
3209 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3210 ARM::VST4q16Pseudo_UPD,
3211 ARM::VST4q32Pseudo_UPD };
3212 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
3213 ARM::VST4q16oddPseudo,
3214 ARM::VST4q32oddPseudo };
3215 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3218 case Intrinsic::arm_neon_vst2lane: {
3219 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
3220 ARM::VST2LNd16Pseudo,
3221 ARM::VST2LNd32Pseudo };
3222 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
3223 ARM::VST2LNq32Pseudo };
3224 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
3227 case Intrinsic::arm_neon_vst3lane: {
3228 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
3229 ARM::VST3LNd16Pseudo,
3230 ARM::VST3LNd32Pseudo };
3231 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
3232 ARM::VST3LNq32Pseudo };
3233 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
3236 case Intrinsic::arm_neon_vst4lane: {
3237 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
3238 ARM::VST4LNd16Pseudo,
3239 ARM::VST4LNd32Pseudo };
3240 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
3241 ARM::VST4LNq32Pseudo };
3242 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
3248 case ISD::INTRINSIC_WO_CHAIN: {
3249 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3254 case Intrinsic::arm_neon_vtbl2:
3255 return SelectVTBL(N, false, 2, ARM::VTBL2);
3256 case Intrinsic::arm_neon_vtbl3:
3257 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
3258 case Intrinsic::arm_neon_vtbl4:
3259 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
3261 case Intrinsic::arm_neon_vtbx2:
3262 return SelectVTBL(N, true, 2, ARM::VTBX2);
3263 case Intrinsic::arm_neon_vtbx3:
3264 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
3265 case Intrinsic::arm_neon_vtbx4:
3266 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
3271 case ARMISD::VTBL1: {
3273 EVT VT = N->getValueType(0);
3274 SmallVector<SDValue, 6> Ops;
3276 Ops.push_back(N->getOperand(0));
3277 Ops.push_back(N->getOperand(1));
3278 Ops.push_back(getAL(CurDAG)); // Predicate
3279 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3280 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops);
3282 case ARMISD::VTBL2: {
3284 EVT VT = N->getValueType(0);
3286 // Form a REG_SEQUENCE to force register allocation.
3287 SDValue V0 = N->getOperand(0);
3288 SDValue V1 = N->getOperand(1);
3289 SDValue RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
3291 SmallVector<SDValue, 6> Ops;
3292 Ops.push_back(RegSeq);
3293 Ops.push_back(N->getOperand(2));
3294 Ops.push_back(getAL(CurDAG)); // Predicate
3295 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3296 return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops);
3299 case ISD::CONCAT_VECTORS:
3300 return SelectConcatVector(N);
3303 return SelectCode(N);
3306 SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
3307 std::vector<SDValue> AsmNodeOperands;
3308 unsigned Flag, Kind;
3309 bool Changed = false;
3310 unsigned NumOps = N->getNumOperands();
3312 // Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint.
3313 // However, some instrstions (e.g. ldrexd/strexd in ARM mode) require
3314 // (even/even+1) GPRs and use %n and %Hn to refer to the individual regs
3315 // respectively. Since there is no constraint to explicitly specify a
3316 // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb,
3317 // the 64-bit data may be referred by H, Q, R modifiers, so we still pack
3318 // them into a GPRPair.
3321 SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1)
3322 : SDValue(nullptr,0);
3324 SmallVector<bool, 8> OpChanged;
3325 // Glue node will be appended late.
3326 for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
3327 SDValue op = N->getOperand(i);
3328 AsmNodeOperands.push_back(op);
3330 if (i < InlineAsm::Op_FirstOperand)
3333 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(i))) {
3334 Flag = C->getZExtValue();
3335 Kind = InlineAsm::getKind(Flag);
3340 // Immediate operands to inline asm in the SelectionDAG are modeled with
3341 // two operands. The first is a constant of value InlineAsm::Kind_Imm, and
3342 // the second is a constant with the value of the immediate. If we get here
3343 // and we have a Kind_Imm, skip the next operand, and continue.
3344 if (Kind == InlineAsm::Kind_Imm) {
3345 SDValue op = N->getOperand(++i);
3346 AsmNodeOperands.push_back(op);
3350 unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag);
3352 OpChanged.push_back(false);
3354 unsigned DefIdx = 0;
3355 bool IsTiedToChangedOp = false;
3356 // If it's a use that is tied with a previous def, it has no
3357 // reg class constraint.
3358 if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
3359 IsTiedToChangedOp = OpChanged[DefIdx];
3361 if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
3362 && Kind != InlineAsm::Kind_RegDefEarlyClobber)
3366 bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC);
3367 if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID))
3371 assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
3372 SDValue V0 = N->getOperand(i+1);
3373 SDValue V1 = N->getOperand(i+2);
3374 unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
3375 unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
3377 MachineRegisterInfo &MRI = MF->getRegInfo();
3379 if (Kind == InlineAsm::Kind_RegDef ||
3380 Kind == InlineAsm::Kind_RegDefEarlyClobber) {
3381 // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to
3382 // the original GPRs.
3384 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3385 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3386 SDValue Chain = SDValue(N,0);
3388 SDNode *GU = N->getGluedUser();
3389 SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::Untyped,
3392 // Extract values from a GPRPair reg and copy to the original GPR reg.
3393 SDValue Sub0 = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
3395 SDValue Sub1 = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
3397 SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0,
3398 RegCopy.getValue(1));
3399 SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1));
3401 // Update the original glue user.
3402 std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
3403 Ops.push_back(T1.getValue(1));
3404 CurDAG->UpdateNodeOperands(GU, Ops);
3407 // For Kind == InlineAsm::Kind_RegUse, we first copy two GPRs into a
3408 // GPRPair and then pass the GPRPair to the inline asm.
3409 SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain];
3411 // As REG_SEQ doesn't take RegisterSDNode, we copy them first.
3412 SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32,
3414 SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32,
3416 SDValue Pair = SDValue(createGPRPairNode(MVT::Untyped, T0, T1), 0);
3418 // Copy REG_SEQ into a GPRPair-typed VR and replace the original two
3419 // i32 VRs of inline asm with it.
3420 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3421 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3422 Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1));
3424 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
3425 Glue = Chain.getValue(1);
3430 if(PairedReg.getNode()) {
3431 OpChanged[OpChanged.size() -1 ] = true;
3432 Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/);
3433 if (IsTiedToChangedOp)
3434 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx);
3436 Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
3437 // Replace the current flag.
3438 AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
3440 // Add the new register node and skip the original two GPRs.
3441 AsmNodeOperands.push_back(PairedReg);
3442 // Skip the next two GPRs.
3448 AsmNodeOperands.push_back(Glue);
3452 SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
3453 CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
3455 return New.getNode();
3459 bool ARMDAGToDAGISel::
3460 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
3461 std::vector<SDValue> &OutOps) {
3462 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
3463 // Require the address to be in a register. That is safe for all ARM
3464 // variants and it is hard to do anything much smarter without knowing
3465 // how the operand is used.
3466 OutOps.push_back(Op);
3470 /// createARMISelDag - This pass converts a legalized DAG into a
3471 /// ARM-specific DAG, ready for instruction scheduling.
3473 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
3474 CodeGenOpt::Level OptLevel) {
3475 return new ARMDAGToDAGISel(TM, OptLevel);