1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "arm-isel"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMTargetMachine.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/IR/CallingConv.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Compiler.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Target/TargetLowering.h"
36 #include "llvm/Target/TargetOptions.h"
41 DisableShifterOp("disable-shifter-op", cl::Hidden,
42 cl::desc("Disable isel of shifter-op"),
46 CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
47 cl::desc("Check fp vmla / vmls hazard at isel time"),
50 //===--------------------------------------------------------------------===//
51 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
52 /// instructions for SelectionDAG operations.
57 AM2_BASE, // Simple AM2 (+-imm12)
58 AM2_SHOP // Shifter-op AM2
61 class ARMDAGToDAGISel : public SelectionDAGISel {
62 ARMBaseTargetMachine &TM;
64 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
65 /// make the right decision when generating code for different targets.
66 const ARMSubtarget *Subtarget;
69 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
70 CodeGenOpt::Level OptLevel)
71 : SelectionDAGISel(tm, OptLevel), TM(tm),
72 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
75 const char *getPassName() const override {
76 return "ARM Instruction Selection";
79 void PreprocessISelDAG() override;
81 /// getI32Imm - Return a target constant of type i32 with the specified
83 inline SDValue getI32Imm(unsigned Imm) {
84 return CurDAG->getTargetConstant(Imm, MVT::i32);
87 SDNode *Select(SDNode *N) override;
90 bool hasNoVMLxHazardUse(SDNode *N) const;
91 bool isShifterOpProfitable(const SDValue &Shift,
92 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
93 bool SelectRegShifterOperand(SDValue N, SDValue &A,
94 SDValue &B, SDValue &C,
95 bool CheckProfitability = true);
96 bool SelectImmShifterOperand(SDValue N, SDValue &A,
97 SDValue &B, bool CheckProfitability = true);
98 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A,
99 SDValue &B, SDValue &C) {
100 // Don't apply the profitability check
101 return SelectRegShifterOperand(N, A, B, C, false);
103 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A,
105 // Don't apply the profitability check
106 return SelectImmShifterOperand(N, A, B, false);
109 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
110 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
112 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
113 SDValue &Offset, SDValue &Opc);
114 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
116 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
119 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
121 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
124 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
126 SelectAddrMode2Worker(N, Base, Offset, Opc);
127 // return SelectAddrMode2ShOp(N, Base, Offset, Opc);
128 // This always matches one way or another.
132 bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
133 const ConstantSDNode *CN = cast<ConstantSDNode>(N);
134 Pred = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
135 Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
139 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
140 SDValue &Offset, SDValue &Opc);
141 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
142 SDValue &Offset, SDValue &Opc);
143 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
144 SDValue &Offset, SDValue &Opc);
145 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
146 bool SelectAddrMode3(SDValue N, SDValue &Base,
147 SDValue &Offset, SDValue &Opc);
148 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
149 SDValue &Offset, SDValue &Opc);
150 bool SelectAddrMode5(SDValue N, SDValue &Base,
152 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
153 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
155 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
157 // Thumb Addressing Modes:
158 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
159 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
161 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
162 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
163 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
164 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
166 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
168 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
170 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
172 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
174 // Thumb 2 Addressing Modes:
175 bool SelectT2ShifterOperandReg(SDValue N,
176 SDValue &BaseReg, SDValue &Opc);
177 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
178 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
180 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
182 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
183 SDValue &OffReg, SDValue &ShImm);
184 bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm);
186 inline bool is_so_imm(unsigned Imm) const {
187 return ARM_AM::getSOImmVal(Imm) != -1;
190 inline bool is_so_imm_not(unsigned Imm) const {
191 return ARM_AM::getSOImmVal(~Imm) != -1;
194 inline bool is_t2_so_imm(unsigned Imm) const {
195 return ARM_AM::getT2SOImmVal(Imm) != -1;
198 inline bool is_t2_so_imm_not(unsigned Imm) const {
199 return ARM_AM::getT2SOImmVal(~Imm) != -1;
202 // Include the pieces autogenerated from the target description.
203 #include "ARMGenDAGISel.inc"
206 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
208 SDNode *SelectARMIndexedLoad(SDNode *N);
209 SDNode *SelectT2IndexedLoad(SDNode *N);
211 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
212 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
213 /// loads of D registers and even subregs and odd subregs of Q registers.
214 /// For NumVecs <= 2, QOpcodes1 is not used.
215 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
216 const uint16_t *DOpcodes,
217 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
219 /// SelectVST - Select NEON store intrinsics. NumVecs should
220 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
221 /// stores of D registers and even subregs and odd subregs of Q registers.
222 /// For NumVecs <= 2, QOpcodes1 is not used.
223 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
224 const uint16_t *DOpcodes,
225 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
227 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
228 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
229 /// load/store of D registers and Q registers.
230 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
231 bool isUpdating, unsigned NumVecs,
232 const uint16_t *DOpcodes, const uint16_t *QOpcodes);
234 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
235 /// should be 2, 3 or 4. The opcode array specifies the instructions used
236 /// for loading D registers. (Q registers are not supported.)
237 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
238 const uint16_t *Opcodes);
240 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
241 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
242 /// generated to force the table registers to be consecutive.
243 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
245 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
246 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
248 // Select special operations if node forms integer ABS pattern
249 SDNode *SelectABSOp(SDNode *N);
251 SDNode *SelectInlineAsm(SDNode *N);
253 SDNode *SelectConcatVector(SDNode *N);
255 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
256 /// inline asm expressions.
257 bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
258 std::vector<SDValue> &OutOps) override;
260 // Form pairs of consecutive R, S, D, or Q registers.
261 SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1);
262 SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1);
263 SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1);
264 SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1);
266 // Form sequences of 4 consecutive S, D, or Q registers.
267 SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
268 SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
269 SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
271 // Get the alignment operand for a NEON VLD or VST instruction.
272 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
276 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
277 /// operand. If so Imm will receive the 32-bit value.
278 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
279 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
280 Imm = cast<ConstantSDNode>(N)->getZExtValue();
286 // isInt32Immediate - This method tests to see if a constant operand.
287 // If so Imm will receive the 32 bit value.
288 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
289 return isInt32Immediate(N.getNode(), Imm);
292 // isOpcWithIntImmediate - This method tests to see if the node is a specific
293 // opcode and that it has a immediate integer right operand.
294 // If so Imm will receive the 32 bit value.
295 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
296 return N->getOpcode() == Opc &&
297 isInt32Immediate(N->getOperand(1).getNode(), Imm);
300 /// \brief Check whether a particular node is a constant value representable as
301 /// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
303 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
304 static bool isScaledConstantInRange(SDValue Node, int Scale,
305 int RangeMin, int RangeMax,
306 int &ScaledConstant) {
307 assert(Scale > 0 && "Invalid scale!");
309 // Check that this is a constant.
310 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
314 ScaledConstant = (int) C->getZExtValue();
315 if ((ScaledConstant % Scale) != 0)
318 ScaledConstant /= Scale;
319 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
322 void ARMDAGToDAGISel::PreprocessISelDAG() {
323 if (!Subtarget->hasV6T2Ops())
326 bool isThumb2 = Subtarget->isThumb();
327 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
328 E = CurDAG->allnodes_end(); I != E; ) {
329 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
331 if (N->getOpcode() != ISD::ADD)
334 // Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
335 // leading zeros, followed by consecutive set bits, followed by 1 or 2
336 // trailing zeros, e.g. 1020.
337 // Transform the expression to
338 // (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number
339 // of trailing zeros of c2. The left shift would be folded as an shifter
340 // operand of 'add' and the 'and' and 'srl' would become a bits extraction
343 SDValue N0 = N->getOperand(0);
344 SDValue N1 = N->getOperand(1);
345 unsigned And_imm = 0;
346 if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
347 if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
353 // Check if the AND mask is an immediate of the form: 000.....1111111100
354 unsigned TZ = countTrailingZeros(And_imm);
355 if (TZ != 1 && TZ != 2)
356 // Be conservative here. Shifter operands aren't always free. e.g. On
357 // Swift, left shifter operand of 1 / 2 for free but others are not.
359 // ubfx r3, r1, #16, #8
360 // ldr.w r3, [r0, r3, lsl #2]
363 // and.w r2, r9, r1, lsr #14
367 if (And_imm & (And_imm + 1))
370 // Look for (and (srl X, c1), c2).
371 SDValue Srl = N1.getOperand(0);
372 unsigned Srl_imm = 0;
373 if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) ||
377 // Make sure first operand is not a shifter operand which would prevent
378 // folding of the left shift.
383 if (SelectT2ShifterOperandReg(N0, CPTmp0, CPTmp1))
386 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) ||
387 SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2))
391 // Now make the transformation.
392 Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
394 CurDAG->getConstant(Srl_imm+TZ, MVT::i32));
395 N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
396 Srl, CurDAG->getConstant(And_imm, MVT::i32));
397 N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
398 N1, CurDAG->getConstant(TZ, MVT::i32));
399 CurDAG->UpdateNodeOperands(N, N0, N1);
403 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
404 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
405 /// least on current ARM implementations) which should be avoidded.
406 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
407 if (OptLevel == CodeGenOpt::None)
410 if (!CheckVMLxHazard)
413 if (!Subtarget->isCortexA7() && !Subtarget->isCortexA8() &&
414 !Subtarget->isCortexA9() && !Subtarget->isSwift())
420 SDNode *Use = *N->use_begin();
421 if (Use->getOpcode() == ISD::CopyToReg)
423 if (Use->isMachineOpcode()) {
424 const ARMBaseInstrInfo *TII =
425 static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo());
427 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
430 unsigned Opcode = MCID.getOpcode();
431 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
433 // vmlx feeding into another vmlx. We actually want to unfold
434 // the use later in the MLxExpansion pass. e.g.
436 // vmla (stall 8 cycles)
441 // This adds up to about 18 - 19 cycles.
444 // vmul (stall 4 cycles)
445 // vadd adds up to about 14 cycles.
446 return TII->isFpMLxInstruction(Opcode);
452 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
453 ARM_AM::ShiftOpc ShOpcVal,
455 if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
457 if (Shift.hasOneUse())
460 return ShOpcVal == ARM_AM::lsl &&
461 (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
464 bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
467 bool CheckProfitability) {
468 if (DisableShifterOp)
471 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
473 // Don't match base register only case. That is matched to a separate
474 // lower complexity pattern with explicit register operand.
475 if (ShOpcVal == ARM_AM::no_shift) return false;
477 BaseReg = N.getOperand(0);
478 unsigned ShImmVal = 0;
479 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
480 if (!RHS) return false;
481 ShImmVal = RHS->getZExtValue() & 31;
482 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
487 bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
491 bool CheckProfitability) {
492 if (DisableShifterOp)
495 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
497 // Don't match base register only case. That is matched to a separate
498 // lower complexity pattern with explicit register operand.
499 if (ShOpcVal == ARM_AM::no_shift) return false;
501 BaseReg = N.getOperand(0);
502 unsigned ShImmVal = 0;
503 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
504 if (RHS) return false;
506 ShReg = N.getOperand(1);
507 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
509 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
515 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
518 // Match simple R + imm12 operands.
521 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
522 !CurDAG->isBaseWithConstantOffset(N)) {
523 if (N.getOpcode() == ISD::FrameIndex) {
524 // Match frame index.
525 int FI = cast<FrameIndexSDNode>(N)->getIndex();
526 Base = CurDAG->getTargetFrameIndex(FI,
527 getTargetLowering()->getPointerTy());
528 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
532 if (N.getOpcode() == ARMISD::Wrapper &&
533 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
534 Base = N.getOperand(0);
537 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
541 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
542 int RHSC = (int)RHS->getZExtValue();
543 if (N.getOpcode() == ISD::SUB)
546 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
547 Base = N.getOperand(0);
548 if (Base.getOpcode() == ISD::FrameIndex) {
549 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
550 Base = CurDAG->getTargetFrameIndex(FI,
551 getTargetLowering()->getPointerTy());
553 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
560 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
566 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
568 if (N.getOpcode() == ISD::MUL &&
569 ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
570 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
571 // X * [3,5,9] -> X + X * [2,4,8] etc.
572 int RHSC = (int)RHS->getZExtValue();
575 ARM_AM::AddrOpc AddSub = ARM_AM::add;
577 AddSub = ARM_AM::sub;
580 if (isPowerOf2_32(RHSC)) {
581 unsigned ShAmt = Log2_32(RHSC);
582 Base = Offset = N.getOperand(0);
583 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
592 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
593 // ISD::OR that is equivalent to an ISD::ADD.
594 !CurDAG->isBaseWithConstantOffset(N))
597 // Leave simple R +/- imm12 operands for LDRi12
598 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
600 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
601 -0x1000+1, 0x1000, RHSC)) // 12 bits.
605 // Otherwise this is R +/- [possibly shifted] R.
606 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
607 ARM_AM::ShiftOpc ShOpcVal =
608 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
611 Base = N.getOperand(0);
612 Offset = N.getOperand(1);
614 if (ShOpcVal != ARM_AM::no_shift) {
615 // Check to see if the RHS of the shift is a constant, if not, we can't fold
617 if (ConstantSDNode *Sh =
618 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
619 ShAmt = Sh->getZExtValue();
620 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
621 Offset = N.getOperand(1).getOperand(0);
624 ShOpcVal = ARM_AM::no_shift;
627 ShOpcVal = ARM_AM::no_shift;
631 // Try matching (R shl C) + (R).
632 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
633 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
634 N.getOperand(0).hasOneUse())) {
635 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
636 if (ShOpcVal != ARM_AM::no_shift) {
637 // Check to see if the RHS of the shift is a constant, if not, we can't
639 if (ConstantSDNode *Sh =
640 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
641 ShAmt = Sh->getZExtValue();
642 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
643 Offset = N.getOperand(0).getOperand(0);
644 Base = N.getOperand(1);
647 ShOpcVal = ARM_AM::no_shift;
650 ShOpcVal = ARM_AM::no_shift;
655 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
663 AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
667 if (N.getOpcode() == ISD::MUL &&
668 (!(Subtarget->isLikeA9() || Subtarget->isSwift()) || N.hasOneUse())) {
669 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
670 // X * [3,5,9] -> X + X * [2,4,8] etc.
671 int RHSC = (int)RHS->getZExtValue();
674 ARM_AM::AddrOpc AddSub = ARM_AM::add;
676 AddSub = ARM_AM::sub;
679 if (isPowerOf2_32(RHSC)) {
680 unsigned ShAmt = Log2_32(RHSC);
681 Base = Offset = N.getOperand(0);
682 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
691 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
692 // ISD::OR that is equivalent to an ADD.
693 !CurDAG->isBaseWithConstantOffset(N)) {
695 if (N.getOpcode() == ISD::FrameIndex) {
696 int FI = cast<FrameIndexSDNode>(N)->getIndex();
697 Base = CurDAG->getTargetFrameIndex(FI,
698 getTargetLowering()->getPointerTy());
699 } else if (N.getOpcode() == ARMISD::Wrapper &&
700 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
701 Base = N.getOperand(0);
703 Offset = CurDAG->getRegister(0, MVT::i32);
704 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
710 // Match simple R +/- imm12 operands.
711 if (N.getOpcode() != ISD::SUB) {
713 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
714 -0x1000+1, 0x1000, RHSC)) { // 12 bits.
715 Base = N.getOperand(0);
716 if (Base.getOpcode() == ISD::FrameIndex) {
717 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
718 Base = CurDAG->getTargetFrameIndex(FI,
719 getTargetLowering()->getPointerTy());
721 Offset = CurDAG->getRegister(0, MVT::i32);
723 ARM_AM::AddrOpc AddSub = ARM_AM::add;
725 AddSub = ARM_AM::sub;
728 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
735 if ((Subtarget->isLikeA9() || Subtarget->isSwift()) && !N.hasOneUse()) {
736 // Compute R +/- (R << N) and reuse it.
738 Offset = CurDAG->getRegister(0, MVT::i32);
739 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
745 // Otherwise this is R +/- [possibly shifted] R.
746 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
747 ARM_AM::ShiftOpc ShOpcVal =
748 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
751 Base = N.getOperand(0);
752 Offset = N.getOperand(1);
754 if (ShOpcVal != ARM_AM::no_shift) {
755 // Check to see if the RHS of the shift is a constant, if not, we can't fold
757 if (ConstantSDNode *Sh =
758 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
759 ShAmt = Sh->getZExtValue();
760 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
761 Offset = N.getOperand(1).getOperand(0);
764 ShOpcVal = ARM_AM::no_shift;
767 ShOpcVal = ARM_AM::no_shift;
771 // Try matching (R shl C) + (R).
772 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
773 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
774 N.getOperand(0).hasOneUse())) {
775 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
776 if (ShOpcVal != ARM_AM::no_shift) {
777 // Check to see if the RHS of the shift is a constant, if not, we can't
779 if (ConstantSDNode *Sh =
780 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
781 ShAmt = Sh->getZExtValue();
782 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
783 Offset = N.getOperand(0).getOperand(0);
784 Base = N.getOperand(1);
787 ShOpcVal = ARM_AM::no_shift;
790 ShOpcVal = ARM_AM::no_shift;
795 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
800 bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
801 SDValue &Offset, SDValue &Opc) {
802 unsigned Opcode = Op->getOpcode();
803 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
804 ? cast<LoadSDNode>(Op)->getAddressingMode()
805 : cast<StoreSDNode>(Op)->getAddressingMode();
806 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
807 ? ARM_AM::add : ARM_AM::sub;
809 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
813 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
815 if (ShOpcVal != ARM_AM::no_shift) {
816 // Check to see if the RHS of the shift is a constant, if not, we can't fold
818 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
819 ShAmt = Sh->getZExtValue();
820 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
821 Offset = N.getOperand(0);
824 ShOpcVal = ARM_AM::no_shift;
827 ShOpcVal = ARM_AM::no_shift;
831 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
836 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
837 SDValue &Offset, SDValue &Opc) {
838 unsigned Opcode = Op->getOpcode();
839 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
840 ? cast<LoadSDNode>(Op)->getAddressingMode()
841 : cast<StoreSDNode>(Op)->getAddressingMode();
842 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
843 ? ARM_AM::add : ARM_AM::sub;
845 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
846 if (AddSub == ARM_AM::sub) Val *= -1;
847 Offset = CurDAG->getRegister(0, MVT::i32);
848 Opc = CurDAG->getTargetConstant(Val, MVT::i32);
856 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
857 SDValue &Offset, SDValue &Opc) {
858 unsigned Opcode = Op->getOpcode();
859 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
860 ? cast<LoadSDNode>(Op)->getAddressingMode()
861 : cast<StoreSDNode>(Op)->getAddressingMode();
862 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
863 ? ARM_AM::add : ARM_AM::sub;
865 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
866 Offset = CurDAG->getRegister(0, MVT::i32);
867 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
876 bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
881 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
882 SDValue &Base, SDValue &Offset,
884 if (N.getOpcode() == ISD::SUB) {
885 // X - C is canonicalize to X + -C, no need to handle it here.
886 Base = N.getOperand(0);
887 Offset = N.getOperand(1);
888 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
892 if (!CurDAG->isBaseWithConstantOffset(N)) {
894 if (N.getOpcode() == ISD::FrameIndex) {
895 int FI = cast<FrameIndexSDNode>(N)->getIndex();
896 Base = CurDAG->getTargetFrameIndex(FI,
897 getTargetLowering()->getPointerTy());
899 Offset = CurDAG->getRegister(0, MVT::i32);
900 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
904 // If the RHS is +/- imm8, fold into addr mode.
906 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
907 -256 + 1, 256, RHSC)) { // 8 bits.
908 Base = N.getOperand(0);
909 if (Base.getOpcode() == ISD::FrameIndex) {
910 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
911 Base = CurDAG->getTargetFrameIndex(FI,
912 getTargetLowering()->getPointerTy());
914 Offset = CurDAG->getRegister(0, MVT::i32);
916 ARM_AM::AddrOpc AddSub = ARM_AM::add;
918 AddSub = ARM_AM::sub;
921 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
925 Base = N.getOperand(0);
926 Offset = N.getOperand(1);
927 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
931 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
932 SDValue &Offset, SDValue &Opc) {
933 unsigned Opcode = Op->getOpcode();
934 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
935 ? cast<LoadSDNode>(Op)->getAddressingMode()
936 : cast<StoreSDNode>(Op)->getAddressingMode();
937 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
938 ? ARM_AM::add : ARM_AM::sub;
940 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
941 Offset = CurDAG->getRegister(0, MVT::i32);
942 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
947 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
951 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
952 SDValue &Base, SDValue &Offset) {
953 if (!CurDAG->isBaseWithConstantOffset(N)) {
955 if (N.getOpcode() == ISD::FrameIndex) {
956 int FI = cast<FrameIndexSDNode>(N)->getIndex();
957 Base = CurDAG->getTargetFrameIndex(FI,
958 getTargetLowering()->getPointerTy());
959 } else if (N.getOpcode() == ARMISD::Wrapper &&
960 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
961 Base = N.getOperand(0);
963 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
968 // If the RHS is +/- imm8, fold into addr mode.
970 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
971 -256 + 1, 256, RHSC)) {
972 Base = N.getOperand(0);
973 if (Base.getOpcode() == ISD::FrameIndex) {
974 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
975 Base = CurDAG->getTargetFrameIndex(FI,
976 getTargetLowering()->getPointerTy());
979 ARM_AM::AddrOpc AddSub = ARM_AM::add;
981 AddSub = ARM_AM::sub;
984 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
990 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
995 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
999 unsigned Alignment = 0;
1000 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
1001 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
1002 // The maximum alignment is equal to the memory size being referenced.
1003 unsigned LSNAlign = LSN->getAlignment();
1004 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
1005 if (LSNAlign >= MemSize && MemSize > 1)
1006 Alignment = MemSize;
1008 // All other uses of addrmode6 are for intrinsics. For now just record
1009 // the raw alignment value; it will be refined later based on the legal
1010 // alignment operands for the intrinsic.
1011 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
1014 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1018 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
1020 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
1021 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
1022 if (AM != ISD::POST_INC)
1025 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
1026 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
1027 Offset = CurDAG->getRegister(0, MVT::i32);
1032 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
1033 SDValue &Offset, SDValue &Label) {
1034 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
1035 Offset = N.getOperand(0);
1036 SDValue N1 = N.getOperand(1);
1037 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
1046 //===----------------------------------------------------------------------===//
1047 // Thumb Addressing Modes
1048 //===----------------------------------------------------------------------===//
1050 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
1051 SDValue &Base, SDValue &Offset){
1052 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
1053 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
1054 if (!NC || !NC->isNullValue())
1061 Base = N.getOperand(0);
1062 Offset = N.getOperand(1);
1067 ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
1068 SDValue &Offset, unsigned Scale) {
1070 SDValue TmpBase, TmpOffImm;
1071 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1072 return false; // We want to select tLDRspi / tSTRspi instead.
1074 if (N.getOpcode() == ARMISD::Wrapper &&
1075 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1076 return false; // We want to select tLDRpci instead.
1079 if (!CurDAG->isBaseWithConstantOffset(N))
1082 // Thumb does not have [sp, r] address mode.
1083 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1084 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1085 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1086 (RHSR && RHSR->getReg() == ARM::SP))
1089 // FIXME: Why do we explicitly check for a match here and then return false?
1090 // Presumably to allow something else to match, but shouldn't this be
1093 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
1096 Base = N.getOperand(0);
1097 Offset = N.getOperand(1);
1102 ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
1105 return SelectThumbAddrModeRI(N, Base, Offset, 1);
1109 ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
1112 return SelectThumbAddrModeRI(N, Base, Offset, 2);
1116 ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
1119 return SelectThumbAddrModeRI(N, Base, Offset, 4);
1123 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1124 SDValue &Base, SDValue &OffImm) {
1126 SDValue TmpBase, TmpOffImm;
1127 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1128 return false; // We want to select tLDRspi / tSTRspi instead.
1130 if (N.getOpcode() == ARMISD::Wrapper &&
1131 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1132 return false; // We want to select tLDRpci instead.
1135 if (!CurDAG->isBaseWithConstantOffset(N)) {
1136 if (N.getOpcode() == ARMISD::Wrapper &&
1137 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
1138 Base = N.getOperand(0);
1143 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1147 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1148 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1149 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1150 (RHSR && RHSR->getReg() == ARM::SP)) {
1151 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
1152 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1153 unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
1154 unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
1156 // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
1157 if (LHSC != 0 || RHSC != 0) return false;
1160 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1164 // If the RHS is + imm5 * scale, fold into addr mode.
1166 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1167 Base = N.getOperand(0);
1168 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1172 Base = N.getOperand(0);
1173 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1178 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1180 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1184 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1186 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1190 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1192 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1195 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1196 SDValue &Base, SDValue &OffImm) {
1197 if (N.getOpcode() == ISD::FrameIndex) {
1198 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1199 Base = CurDAG->getTargetFrameIndex(FI,
1200 getTargetLowering()->getPointerTy());
1201 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1205 if (!CurDAG->isBaseWithConstantOffset(N))
1208 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1209 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1210 (LHSR && LHSR->getReg() == ARM::SP)) {
1211 // If the RHS is + imm8 * scale, fold into addr mode.
1213 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1214 Base = N.getOperand(0);
1215 if (Base.getOpcode() == ISD::FrameIndex) {
1216 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1217 Base = CurDAG->getTargetFrameIndex(FI,
1218 getTargetLowering()->getPointerTy());
1220 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1229 //===----------------------------------------------------------------------===//
1230 // Thumb 2 Addressing Modes
1231 //===----------------------------------------------------------------------===//
1234 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1236 if (DisableShifterOp)
1239 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
1241 // Don't match base register only case. That is matched to a separate
1242 // lower complexity pattern with explicit register operand.
1243 if (ShOpcVal == ARM_AM::no_shift) return false;
1245 BaseReg = N.getOperand(0);
1246 unsigned ShImmVal = 0;
1247 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1248 ShImmVal = RHS->getZExtValue() & 31;
1249 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
1256 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1257 SDValue &Base, SDValue &OffImm) {
1258 // Match simple R + imm12 operands.
1261 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1262 !CurDAG->isBaseWithConstantOffset(N)) {
1263 if (N.getOpcode() == ISD::FrameIndex) {
1264 // Match frame index.
1265 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1266 Base = CurDAG->getTargetFrameIndex(FI,
1267 getTargetLowering()->getPointerTy());
1268 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1272 if (N.getOpcode() == ARMISD::Wrapper &&
1273 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
1274 Base = N.getOperand(0);
1275 if (Base.getOpcode() == ISD::TargetConstantPool)
1276 return false; // We want to select t2LDRpci instead.
1279 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1283 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1284 if (SelectT2AddrModeImm8(N, Base, OffImm))
1285 // Let t2LDRi8 handle (R - imm8).
1288 int RHSC = (int)RHS->getZExtValue();
1289 if (N.getOpcode() == ISD::SUB)
1292 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1293 Base = N.getOperand(0);
1294 if (Base.getOpcode() == ISD::FrameIndex) {
1295 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1296 Base = CurDAG->getTargetFrameIndex(FI,
1297 getTargetLowering()->getPointerTy());
1299 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1306 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1310 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1311 SDValue &Base, SDValue &OffImm) {
1312 // Match simple R - imm8 operands.
1313 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1314 !CurDAG->isBaseWithConstantOffset(N))
1317 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1318 int RHSC = (int)RHS->getSExtValue();
1319 if (N.getOpcode() == ISD::SUB)
1322 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1323 Base = N.getOperand(0);
1324 if (Base.getOpcode() == ISD::FrameIndex) {
1325 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1326 Base = CurDAG->getTargetFrameIndex(FI,
1327 getTargetLowering()->getPointerTy());
1329 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1337 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1339 unsigned Opcode = Op->getOpcode();
1340 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1341 ? cast<LoadSDNode>(Op)->getAddressingMode()
1342 : cast<StoreSDNode>(Op)->getAddressingMode();
1344 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1345 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1346 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
1347 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
1354 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1356 SDValue &OffReg, SDValue &ShImm) {
1357 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1358 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1361 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1362 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1363 int RHSC = (int)RHS->getZExtValue();
1364 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1366 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1370 // Look for (R + R) or (R + (R << [1,2,3])).
1372 Base = N.getOperand(0);
1373 OffReg = N.getOperand(1);
1375 // Swap if it is ((R << c) + R).
1376 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1377 if (ShOpcVal != ARM_AM::lsl) {
1378 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1379 if (ShOpcVal == ARM_AM::lsl)
1380 std::swap(Base, OffReg);
1383 if (ShOpcVal == ARM_AM::lsl) {
1384 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1386 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1387 ShAmt = Sh->getZExtValue();
1388 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1389 OffReg = OffReg.getOperand(0);
1392 ShOpcVal = ARM_AM::no_shift;
1395 ShOpcVal = ARM_AM::no_shift;
1399 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
1404 bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
1406 // This *must* succeed since it's used for the irreplaceable ldrex and strex
1409 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1411 if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
1414 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1418 uint32_t RHSC = (int)RHS->getZExtValue();
1419 if (RHSC > 1020 || RHSC % 4 != 0)
1422 Base = N.getOperand(0);
1423 if (Base.getOpcode() == ISD::FrameIndex) {
1424 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1425 Base = CurDAG->getTargetFrameIndex(FI, getTargetLowering()->getPointerTy());
1428 OffImm = CurDAG->getTargetConstant(RHSC / 4, MVT::i32);
1432 //===--------------------------------------------------------------------===//
1434 /// getAL - Returns a ARMCC::AL immediate node.
1435 static inline SDValue getAL(SelectionDAG *CurDAG) {
1436 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
1439 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1440 LoadSDNode *LD = cast<LoadSDNode>(N);
1441 ISD::MemIndexedMode AM = LD->getAddressingMode();
1442 if (AM == ISD::UNINDEXED)
1445 EVT LoadedVT = LD->getMemoryVT();
1446 SDValue Offset, AMOpc;
1447 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1448 unsigned Opcode = 0;
1450 if (LoadedVT == MVT::i32 && isPre &&
1451 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1452 Opcode = ARM::LDR_PRE_IMM;
1454 } else if (LoadedVT == MVT::i32 && !isPre &&
1455 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1456 Opcode = ARM::LDR_POST_IMM;
1458 } else if (LoadedVT == MVT::i32 &&
1459 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1460 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1463 } else if (LoadedVT == MVT::i16 &&
1464 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1466 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1467 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1468 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1469 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1470 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1471 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1473 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1477 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1479 Opcode = ARM::LDRB_PRE_IMM;
1480 } else if (!isPre &&
1481 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1483 Opcode = ARM::LDRB_POST_IMM;
1484 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1486 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1492 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1493 SDValue Chain = LD->getChain();
1494 SDValue Base = LD->getBasePtr();
1495 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
1496 CurDAG->getRegister(0, MVT::i32), Chain };
1497 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1498 MVT::i32, MVT::Other, Ops);
1500 SDValue Chain = LD->getChain();
1501 SDValue Base = LD->getBasePtr();
1502 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
1503 CurDAG->getRegister(0, MVT::i32), Chain };
1504 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1505 MVT::i32, MVT::Other, Ops);
1512 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1513 LoadSDNode *LD = cast<LoadSDNode>(N);
1514 ISD::MemIndexedMode AM = LD->getAddressingMode();
1515 if (AM == ISD::UNINDEXED)
1518 EVT LoadedVT = LD->getMemoryVT();
1519 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1521 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1522 unsigned Opcode = 0;
1524 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1525 switch (LoadedVT.getSimpleVT().SimpleTy) {
1527 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1531 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1533 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1538 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1540 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1549 SDValue Chain = LD->getChain();
1550 SDValue Base = LD->getBasePtr();
1551 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
1552 CurDAG->getRegister(0, MVT::i32), Chain };
1553 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1560 /// \brief Form a GPRPair pseudo register from a pair of GPR regs.
1561 SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
1562 SDLoc dl(V0.getNode());
1564 CurDAG->getTargetConstant(ARM::GPRPairRegClassID, MVT::i32);
1565 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
1566 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
1567 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1568 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1571 /// \brief Form a D register from a pair of S registers.
1572 SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1573 SDLoc dl(V0.getNode());
1575 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
1576 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1577 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1578 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1579 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1582 /// \brief Form a quad register from a pair of D registers.
1583 SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1584 SDLoc dl(V0.getNode());
1585 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
1586 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1587 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1588 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1589 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1592 /// \brief Form 4 consecutive D registers from a pair of Q registers.
1593 SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1594 SDLoc dl(V0.getNode());
1595 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1596 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1597 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1598 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1599 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1602 /// \brief Form 4 consecutive S registers.
1603 SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
1604 SDValue V2, SDValue V3) {
1605 SDLoc dl(V0.getNode());
1607 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
1608 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1609 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1610 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
1611 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
1612 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1613 V2, SubReg2, V3, SubReg3 };
1614 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1617 /// \brief Form 4 consecutive D registers.
1618 SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
1619 SDValue V2, SDValue V3) {
1620 SDLoc dl(V0.getNode());
1621 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1622 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1623 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1624 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
1625 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
1626 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1627 V2, SubReg2, V3, SubReg3 };
1628 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1631 /// \brief Form 4 consecutive Q registers.
1632 SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
1633 SDValue V2, SDValue V3) {
1634 SDLoc dl(V0.getNode());
1635 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
1636 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1637 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1638 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
1639 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
1640 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1641 V2, SubReg2, V3, SubReg3 };
1642 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1645 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1646 /// of a NEON VLD or VST instruction. The supported values depend on the
1647 /// number of registers being loaded.
1648 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
1649 bool is64BitVector) {
1650 unsigned NumRegs = NumVecs;
1651 if (!is64BitVector && NumVecs < 3)
1654 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1655 if (Alignment >= 32 && NumRegs == 4)
1657 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1659 else if (Alignment >= 8)
1664 return CurDAG->getTargetConstant(Alignment, MVT::i32);
1667 static bool isVLDfixed(unsigned Opc)
1670 default: return false;
1671 case ARM::VLD1d8wb_fixed : return true;
1672 case ARM::VLD1d16wb_fixed : return true;
1673 case ARM::VLD1d64Qwb_fixed : return true;
1674 case ARM::VLD1d32wb_fixed : return true;
1675 case ARM::VLD1d64wb_fixed : return true;
1676 case ARM::VLD1d64TPseudoWB_fixed : return true;
1677 case ARM::VLD1d64QPseudoWB_fixed : return true;
1678 case ARM::VLD1q8wb_fixed : return true;
1679 case ARM::VLD1q16wb_fixed : return true;
1680 case ARM::VLD1q32wb_fixed : return true;
1681 case ARM::VLD1q64wb_fixed : return true;
1682 case ARM::VLD2d8wb_fixed : return true;
1683 case ARM::VLD2d16wb_fixed : return true;
1684 case ARM::VLD2d32wb_fixed : return true;
1685 case ARM::VLD2q8PseudoWB_fixed : return true;
1686 case ARM::VLD2q16PseudoWB_fixed : return true;
1687 case ARM::VLD2q32PseudoWB_fixed : return true;
1688 case ARM::VLD2DUPd8wb_fixed : return true;
1689 case ARM::VLD2DUPd16wb_fixed : return true;
1690 case ARM::VLD2DUPd32wb_fixed : return true;
1694 static bool isVSTfixed(unsigned Opc)
1697 default: return false;
1698 case ARM::VST1d8wb_fixed : return true;
1699 case ARM::VST1d16wb_fixed : return true;
1700 case ARM::VST1d32wb_fixed : return true;
1701 case ARM::VST1d64wb_fixed : return true;
1702 case ARM::VST1q8wb_fixed : return true;
1703 case ARM::VST1q16wb_fixed : return true;
1704 case ARM::VST1q32wb_fixed : return true;
1705 case ARM::VST1q64wb_fixed : return true;
1706 case ARM::VST1d64TPseudoWB_fixed : return true;
1707 case ARM::VST1d64QPseudoWB_fixed : return true;
1708 case ARM::VST2d8wb_fixed : return true;
1709 case ARM::VST2d16wb_fixed : return true;
1710 case ARM::VST2d32wb_fixed : return true;
1711 case ARM::VST2q8PseudoWB_fixed : return true;
1712 case ARM::VST2q16PseudoWB_fixed : return true;
1713 case ARM::VST2q32PseudoWB_fixed : return true;
1717 // Get the register stride update opcode of a VLD/VST instruction that
1718 // is otherwise equivalent to the given fixed stride updating instruction.
1719 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
1720 assert((isVLDfixed(Opc) || isVSTfixed(Opc))
1721 && "Incorrect fixed stride updating instruction.");
1724 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
1725 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
1726 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
1727 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
1728 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
1729 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
1730 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
1731 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
1732 case ARM::VLD1d64Twb_fixed: return ARM::VLD1d64Twb_register;
1733 case ARM::VLD1d64Qwb_fixed: return ARM::VLD1d64Qwb_register;
1734 case ARM::VLD1d64TPseudoWB_fixed: return ARM::VLD1d64TPseudoWB_register;
1735 case ARM::VLD1d64QPseudoWB_fixed: return ARM::VLD1d64QPseudoWB_register;
1737 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
1738 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
1739 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
1740 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
1741 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
1742 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
1743 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
1744 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
1745 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
1746 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
1748 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
1749 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
1750 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
1751 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
1752 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
1753 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
1755 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
1756 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
1757 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
1758 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
1759 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
1760 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
1762 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
1763 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
1764 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
1766 return Opc; // If not one we handle, return it unchanged.
1769 SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
1770 const uint16_t *DOpcodes,
1771 const uint16_t *QOpcodes0,
1772 const uint16_t *QOpcodes1) {
1773 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
1776 SDValue MemAddr, Align;
1777 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1778 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1781 SDValue Chain = N->getOperand(0);
1782 EVT VT = N->getValueType(0);
1783 bool is64BitVector = VT.is64BitVector();
1784 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1786 unsigned OpcodeIndex;
1787 switch (VT.getSimpleVT().SimpleTy) {
1788 default: llvm_unreachable("unhandled vld type");
1789 // Double-register operations:
1790 case MVT::v8i8: OpcodeIndex = 0; break;
1791 case MVT::v4i16: OpcodeIndex = 1; break;
1793 case MVT::v2i32: OpcodeIndex = 2; break;
1794 case MVT::v1i64: OpcodeIndex = 3; break;
1795 // Quad-register operations:
1796 case MVT::v16i8: OpcodeIndex = 0; break;
1797 case MVT::v8i16: OpcodeIndex = 1; break;
1799 case MVT::v4i32: OpcodeIndex = 2; break;
1800 case MVT::v2i64: OpcodeIndex = 3;
1801 assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
1809 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1812 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1814 std::vector<EVT> ResTys;
1815 ResTys.push_back(ResTy);
1817 ResTys.push_back(MVT::i32);
1818 ResTys.push_back(MVT::Other);
1820 SDValue Pred = getAL(CurDAG);
1821 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1823 SmallVector<SDValue, 7> Ops;
1825 // Double registers and VLD1/VLD2 quad registers are directly supported.
1826 if (is64BitVector || NumVecs <= 2) {
1827 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1828 QOpcodes0[OpcodeIndex]);
1829 Ops.push_back(MemAddr);
1830 Ops.push_back(Align);
1832 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1833 // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
1834 // case entirely when the rest are updated to that form, too.
1835 if ((NumVecs <= 2) && !isa<ConstantSDNode>(Inc.getNode()))
1836 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1837 // FIXME: We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
1838 // check for that explicitly too. Horribly hacky, but temporary.
1839 if ((NumVecs > 2 && !isVLDfixed(Opc)) ||
1840 !isa<ConstantSDNode>(Inc.getNode()))
1841 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1843 Ops.push_back(Pred);
1844 Ops.push_back(Reg0);
1845 Ops.push_back(Chain);
1846 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1849 // Otherwise, quad registers are loaded with two separate instructions,
1850 // where one loads the even registers and the other loads the odd registers.
1851 EVT AddrTy = MemAddr.getValueType();
1853 // Load the even subregs. This is always an updating load, so that it
1854 // provides the address to the second load for the odd subregs.
1856 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1857 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1858 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1859 ResTy, AddrTy, MVT::Other, OpsA);
1860 Chain = SDValue(VLdA, 2);
1862 // Load the odd subregs.
1863 Ops.push_back(SDValue(VLdA, 1));
1864 Ops.push_back(Align);
1866 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1867 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1868 "only constant post-increment update allowed for VLD3/4");
1870 Ops.push_back(Reg0);
1872 Ops.push_back(SDValue(VLdA, 0));
1873 Ops.push_back(Pred);
1874 Ops.push_back(Reg0);
1875 Ops.push_back(Chain);
1876 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops);
1879 // Transfer memoperands.
1880 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1881 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1882 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
1887 // Extract out the subregisters.
1888 SDValue SuperReg = SDValue(VLd, 0);
1889 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1890 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1891 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
1892 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1893 ReplaceUses(SDValue(N, Vec),
1894 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1895 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1897 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
1901 SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
1902 const uint16_t *DOpcodes,
1903 const uint16_t *QOpcodes0,
1904 const uint16_t *QOpcodes1) {
1905 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1908 SDValue MemAddr, Align;
1909 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1910 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1911 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1914 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1915 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1917 SDValue Chain = N->getOperand(0);
1918 EVT VT = N->getOperand(Vec0Idx).getValueType();
1919 bool is64BitVector = VT.is64BitVector();
1920 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1922 unsigned OpcodeIndex;
1923 switch (VT.getSimpleVT().SimpleTy) {
1924 default: llvm_unreachable("unhandled vst type");
1925 // Double-register operations:
1926 case MVT::v8i8: OpcodeIndex = 0; break;
1927 case MVT::v4i16: OpcodeIndex = 1; break;
1929 case MVT::v2i32: OpcodeIndex = 2; break;
1930 case MVT::v1i64: OpcodeIndex = 3; break;
1931 // Quad-register operations:
1932 case MVT::v16i8: OpcodeIndex = 0; break;
1933 case MVT::v8i16: OpcodeIndex = 1; break;
1935 case MVT::v4i32: OpcodeIndex = 2; break;
1936 case MVT::v2i64: OpcodeIndex = 3;
1937 assert(NumVecs == 1 && "v2i64 type only supported for VST1");
1941 std::vector<EVT> ResTys;
1943 ResTys.push_back(MVT::i32);
1944 ResTys.push_back(MVT::Other);
1946 SDValue Pred = getAL(CurDAG);
1947 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1948 SmallVector<SDValue, 7> Ops;
1950 // Double registers and VST1/VST2 quad registers are directly supported.
1951 if (is64BitVector || NumVecs <= 2) {
1954 SrcReg = N->getOperand(Vec0Idx);
1955 } else if (is64BitVector) {
1956 // Form a REG_SEQUENCE to force register allocation.
1957 SDValue V0 = N->getOperand(Vec0Idx + 0);
1958 SDValue V1 = N->getOperand(Vec0Idx + 1);
1960 SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
1962 SDValue V2 = N->getOperand(Vec0Idx + 2);
1963 // If it's a vst3, form a quad D-register and leave the last part as
1965 SDValue V3 = (NumVecs == 3)
1966 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1967 : N->getOperand(Vec0Idx + 3);
1968 SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
1971 // Form a QQ register.
1972 SDValue Q0 = N->getOperand(Vec0Idx);
1973 SDValue Q1 = N->getOperand(Vec0Idx + 1);
1974 SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0);
1977 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1978 QOpcodes0[OpcodeIndex]);
1979 Ops.push_back(MemAddr);
1980 Ops.push_back(Align);
1982 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1983 // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
1984 // case entirely when the rest are updated to that form, too.
1985 if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
1986 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1987 // FIXME: We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
1988 // check for that explicitly too. Horribly hacky, but temporary.
1989 if (!isa<ConstantSDNode>(Inc.getNode()))
1991 else if (NumVecs > 2 && !isVSTfixed(Opc))
1992 Ops.push_back(Reg0);
1994 Ops.push_back(SrcReg);
1995 Ops.push_back(Pred);
1996 Ops.push_back(Reg0);
1997 Ops.push_back(Chain);
1998 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2000 // Transfer memoperands.
2001 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
2006 // Otherwise, quad registers are stored with two separate instructions,
2007 // where one stores the even registers and the other stores the odd registers.
2009 // Form the QQQQ REG_SEQUENCE.
2010 SDValue V0 = N->getOperand(Vec0Idx + 0);
2011 SDValue V1 = N->getOperand(Vec0Idx + 1);
2012 SDValue V2 = N->getOperand(Vec0Idx + 2);
2013 SDValue V3 = (NumVecs == 3)
2014 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2015 : N->getOperand(Vec0Idx + 3);
2016 SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2018 // Store the even D registers. This is always an updating store, so that it
2019 // provides the address to the second store for the odd subregs.
2020 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
2021 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2022 MemAddr.getValueType(),
2024 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
2025 Chain = SDValue(VStA, 1);
2027 // Store the odd D registers.
2028 Ops.push_back(SDValue(VStA, 0));
2029 Ops.push_back(Align);
2031 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2032 assert(isa<ConstantSDNode>(Inc.getNode()) &&
2033 "only constant post-increment update allowed for VST3/4");
2035 Ops.push_back(Reg0);
2037 Ops.push_back(RegSeq);
2038 Ops.push_back(Pred);
2039 Ops.push_back(Reg0);
2040 Ops.push_back(Chain);
2041 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
2043 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
2047 SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
2048 bool isUpdating, unsigned NumVecs,
2049 const uint16_t *DOpcodes,
2050 const uint16_t *QOpcodes) {
2051 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
2054 SDValue MemAddr, Align;
2055 unsigned AddrOpIdx = isUpdating ? 1 : 2;
2056 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2057 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2060 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2061 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2063 SDValue Chain = N->getOperand(0);
2065 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
2066 EVT VT = N->getOperand(Vec0Idx).getValueType();
2067 bool is64BitVector = VT.is64BitVector();
2069 unsigned Alignment = 0;
2071 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2072 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2073 if (Alignment > NumBytes)
2074 Alignment = NumBytes;
2075 if (Alignment < 8 && Alignment < NumBytes)
2077 // Alignment must be a power of two; make sure of that.
2078 Alignment = (Alignment & -Alignment);
2082 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2084 unsigned OpcodeIndex;
2085 switch (VT.getSimpleVT().SimpleTy) {
2086 default: llvm_unreachable("unhandled vld/vst lane type");
2087 // Double-register operations:
2088 case MVT::v8i8: OpcodeIndex = 0; break;
2089 case MVT::v4i16: OpcodeIndex = 1; break;
2091 case MVT::v2i32: OpcodeIndex = 2; break;
2092 // Quad-register operations:
2093 case MVT::v8i16: OpcodeIndex = 0; break;
2095 case MVT::v4i32: OpcodeIndex = 1; break;
2098 std::vector<EVT> ResTys;
2100 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2103 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
2104 MVT::i64, ResTyElts));
2107 ResTys.push_back(MVT::i32);
2108 ResTys.push_back(MVT::Other);
2110 SDValue Pred = getAL(CurDAG);
2111 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2113 SmallVector<SDValue, 8> Ops;
2114 Ops.push_back(MemAddr);
2115 Ops.push_back(Align);
2117 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2118 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
2122 SDValue V0 = N->getOperand(Vec0Idx + 0);
2123 SDValue V1 = N->getOperand(Vec0Idx + 1);
2126 SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2128 SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0);
2130 SDValue V2 = N->getOperand(Vec0Idx + 2);
2131 SDValue V3 = (NumVecs == 3)
2132 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2133 : N->getOperand(Vec0Idx + 3);
2135 SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2137 SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2139 Ops.push_back(SuperReg);
2140 Ops.push_back(getI32Imm(Lane));
2141 Ops.push_back(Pred);
2142 Ops.push_back(Reg0);
2143 Ops.push_back(Chain);
2145 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2146 QOpcodes[OpcodeIndex]);
2147 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2148 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
2152 // Extract the subregisters.
2153 SuperReg = SDValue(VLdLn, 0);
2154 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
2155 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
2156 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
2157 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2158 ReplaceUses(SDValue(N, Vec),
2159 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2160 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
2162 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
2166 SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
2168 const uint16_t *Opcodes) {
2169 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
2172 SDValue MemAddr, Align;
2173 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
2176 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2177 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2179 SDValue Chain = N->getOperand(0);
2180 EVT VT = N->getValueType(0);
2182 unsigned Alignment = 0;
2184 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2185 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2186 if (Alignment > NumBytes)
2187 Alignment = NumBytes;
2188 if (Alignment < 8 && Alignment < NumBytes)
2190 // Alignment must be a power of two; make sure of that.
2191 Alignment = (Alignment & -Alignment);
2195 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2197 unsigned OpcodeIndex;
2198 switch (VT.getSimpleVT().SimpleTy) {
2199 default: llvm_unreachable("unhandled vld-dup type");
2200 case MVT::v8i8: OpcodeIndex = 0; break;
2201 case MVT::v4i16: OpcodeIndex = 1; break;
2203 case MVT::v2i32: OpcodeIndex = 2; break;
2206 SDValue Pred = getAL(CurDAG);
2207 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2209 unsigned Opc = Opcodes[OpcodeIndex];
2210 SmallVector<SDValue, 6> Ops;
2211 Ops.push_back(MemAddr);
2212 Ops.push_back(Align);
2214 // fixed-stride update instructions don't have an explicit writeback
2215 // operand. It's implicit in the opcode itself.
2216 SDValue Inc = N->getOperand(2);
2217 if (!isa<ConstantSDNode>(Inc.getNode()))
2219 // FIXME: VLD3 and VLD4 haven't been updated to that form yet.
2220 else if (NumVecs > 2)
2221 Ops.push_back(Reg0);
2223 Ops.push_back(Pred);
2224 Ops.push_back(Reg0);
2225 Ops.push_back(Chain);
2227 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2228 std::vector<EVT> ResTys;
2229 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
2231 ResTys.push_back(MVT::i32);
2232 ResTys.push_back(MVT::Other);
2233 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2234 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
2235 SuperReg = SDValue(VLdDup, 0);
2237 // Extract the subregisters.
2238 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
2239 unsigned SubIdx = ARM::dsub_0;
2240 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2241 ReplaceUses(SDValue(N, Vec),
2242 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
2243 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
2245 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
2249 SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
2251 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
2253 EVT VT = N->getValueType(0);
2254 unsigned FirstTblReg = IsExt ? 2 : 1;
2256 // Form a REG_SEQUENCE to force register allocation.
2258 SDValue V0 = N->getOperand(FirstTblReg + 0);
2259 SDValue V1 = N->getOperand(FirstTblReg + 1);
2261 RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
2263 SDValue V2 = N->getOperand(FirstTblReg + 2);
2264 // If it's a vtbl3, form a quad D-register and leave the last part as
2266 SDValue V3 = (NumVecs == 3)
2267 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2268 : N->getOperand(FirstTblReg + 3);
2269 RegSeq = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2272 SmallVector<SDValue, 6> Ops;
2274 Ops.push_back(N->getOperand(1));
2275 Ops.push_back(RegSeq);
2276 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
2277 Ops.push_back(getAL(CurDAG)); // predicate
2278 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
2279 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2282 SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
2284 if (!Subtarget->hasV6T2Ops())
2287 unsigned Opc = isSigned
2288 ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
2289 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
2291 // For unsigned extracts, check for a shift right and mask
2292 unsigned And_imm = 0;
2293 if (N->getOpcode() == ISD::AND) {
2294 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
2296 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2297 if (And_imm & (And_imm + 1))
2300 unsigned Srl_imm = 0;
2301 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
2303 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2305 // Note: The width operand is encoded as width-1.
2306 unsigned Width = CountTrailingOnes_32(And_imm) - 1;
2307 unsigned LSB = Srl_imm;
2309 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2311 if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
2312 // It's cheaper to use a right shift to extract the top bits.
2313 if (Subtarget->isThumb()) {
2314 Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
2315 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2316 CurDAG->getTargetConstant(LSB, MVT::i32),
2317 getAL(CurDAG), Reg0, Reg0 };
2318 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2321 // ARM models shift instructions as MOVsi with shifter operand.
2322 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL);
2324 CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB),
2326 SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
2327 getAL(CurDAG), Reg0, Reg0 };
2328 return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops, 5);
2331 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2332 CurDAG->getTargetConstant(LSB, MVT::i32),
2333 CurDAG->getTargetConstant(Width, MVT::i32),
2334 getAL(CurDAG), Reg0 };
2335 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2341 // Otherwise, we're looking for a shift of a shift
2342 unsigned Shl_imm = 0;
2343 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
2344 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
2345 unsigned Srl_imm = 0;
2346 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
2347 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2348 // Note: The width operand is encoded as width-1.
2349 unsigned Width = 32 - Srl_imm - 1;
2350 int LSB = Srl_imm - Shl_imm;
2353 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2354 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2355 CurDAG->getTargetConstant(LSB, MVT::i32),
2356 CurDAG->getTargetConstant(Width, MVT::i32),
2357 getAL(CurDAG), Reg0 };
2358 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2364 /// Target-specific DAG combining for ISD::XOR.
2365 /// Target-independent combining lowers SELECT_CC nodes of the form
2366 /// select_cc setg[ge] X, 0, X, -X
2367 /// select_cc setgt X, -1, X, -X
2368 /// select_cc setl[te] X, 0, -X, X
2369 /// select_cc setlt X, 1, -X, X
2370 /// which represent Integer ABS into:
2371 /// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
2372 /// ARM instruction selection detects the latter and matches it to
2373 /// ARM::ABS or ARM::t2ABS machine node.
2374 SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
2375 SDValue XORSrc0 = N->getOperand(0);
2376 SDValue XORSrc1 = N->getOperand(1);
2377 EVT VT = N->getValueType(0);
2379 if (Subtarget->isThumb1Only())
2382 if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
2385 SDValue ADDSrc0 = XORSrc0.getOperand(0);
2386 SDValue ADDSrc1 = XORSrc0.getOperand(1);
2387 SDValue SRASrc0 = XORSrc1.getOperand(0);
2388 SDValue SRASrc1 = XORSrc1.getOperand(1);
2389 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
2390 EVT XType = SRASrc0.getValueType();
2391 unsigned Size = XType.getSizeInBits() - 1;
2393 if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
2394 XType.isInteger() && SRAConstant != NULL &&
2395 Size == SRAConstant->getZExtValue()) {
2396 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
2397 return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
2403 SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2404 // The only time a CONCAT_VECTORS operation can have legal types is when
2405 // two 64-bit vectors are concatenated to a 128-bit vector.
2406 EVT VT = N->getValueType(0);
2407 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2408 llvm_unreachable("unexpected CONCAT_VECTORS");
2409 return createDRegPairNode(VT, N->getOperand(0), N->getOperand(1));
2412 SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2415 if (N->isMachineOpcode()) {
2417 return NULL; // Already selected.
2420 switch (N->getOpcode()) {
2422 case ISD::INLINEASM: {
2423 SDNode *ResNode = SelectInlineAsm(N);
2429 // Select special operations if XOR node forms integer ABS pattern
2430 SDNode *ResNode = SelectABSOp(N);
2433 // Other cases are autogenerated.
2436 case ISD::Constant: {
2437 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2439 if (Subtarget->useMovt())
2440 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2441 // be done with MOV + MOVT, at worst.
2444 if (Subtarget->isThumb()) {
2445 UseCP = (Val > 255 && // MOV
2446 ~Val > 255 && // MOV + MVN
2447 !ARM_AM::isThumbImmShiftedVal(Val) && // MOV + LSL
2448 !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
2450 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2451 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2452 !ARM_AM::isSOImmTwoPartVal(Val) && // two instrs.
2453 !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
2458 CurDAG->getTargetConstantPool(ConstantInt::get(
2459 Type::getInt32Ty(*CurDAG->getContext()), Val),
2460 getTargetLowering()->getPointerTy());
2463 if (Subtarget->isThumb()) {
2464 SDValue Pred = getAL(CurDAG);
2465 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2466 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2467 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2472 CurDAG->getTargetConstant(0, MVT::i32),
2474 CurDAG->getRegister(0, MVT::i32),
2475 CurDAG->getEntryNode()
2477 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2480 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2484 // Other cases are autogenerated.
2487 case ISD::FrameIndex: {
2488 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2489 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2490 SDValue TFI = CurDAG->getTargetFrameIndex(FI,
2491 getTargetLowering()->getPointerTy());
2492 if (Subtarget->isThumb1Only()) {
2493 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2494 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2495 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops, 4);
2497 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2498 ARM::t2ADDri : ARM::ADDri);
2499 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2500 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2501 CurDAG->getRegister(0, MVT::i32) };
2502 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2506 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2510 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2514 if (Subtarget->isThumb1Only())
2516 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2517 unsigned RHSV = C->getZExtValue();
2519 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2520 unsigned ShImm = Log2_32(RHSV-1);
2523 SDValue V = N->getOperand(0);
2524 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2525 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2526 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2527 if (Subtarget->isThumb()) {
2528 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2529 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
2531 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2532 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops, 7);
2535 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2536 unsigned ShImm = Log2_32(RHSV+1);
2539 SDValue V = N->getOperand(0);
2540 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2541 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2542 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2543 if (Subtarget->isThumb()) {
2544 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2545 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6);
2547 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2548 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops, 7);
2554 // Check for unsigned bitfield extract
2555 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2558 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2559 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2560 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2561 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2562 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2563 EVT VT = N->getValueType(0);
2566 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2568 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2571 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2572 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2575 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2576 SDValue N2 = N0.getOperand(1);
2577 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2580 unsigned N1CVal = N1C->getZExtValue();
2581 unsigned N2CVal = N2C->getZExtValue();
2582 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2583 (N1CVal & 0xffffU) == 0xffffU &&
2584 (N2CVal & 0xffffU) == 0x0U) {
2585 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2587 SDValue Ops[] = { N0.getOperand(0), Imm16,
2588 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2589 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2594 case ARMISD::VMOVRRD:
2595 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2596 N->getOperand(0), getAL(CurDAG),
2597 CurDAG->getRegister(0, MVT::i32));
2598 case ISD::UMUL_LOHI: {
2599 if (Subtarget->isThumb1Only())
2601 if (Subtarget->isThumb()) {
2602 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2603 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2604 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops);
2606 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2607 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2608 CurDAG->getRegister(0, MVT::i32) };
2609 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2610 ARM::UMULL : ARM::UMULLv5,
2611 dl, MVT::i32, MVT::i32, Ops);
2614 case ISD::SMUL_LOHI: {
2615 if (Subtarget->isThumb1Only())
2617 if (Subtarget->isThumb()) {
2618 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2619 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2620 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops);
2622 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2623 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2624 CurDAG->getRegister(0, MVT::i32) };
2625 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2626 ARM::SMULL : ARM::SMULLv5,
2627 dl, MVT::i32, MVT::i32, Ops);
2630 case ARMISD::UMLAL:{
2631 if (Subtarget->isThumb()) {
2632 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2633 N->getOperand(3), getAL(CurDAG),
2634 CurDAG->getRegister(0, MVT::i32)};
2635 return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops);
2637 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2638 N->getOperand(3), getAL(CurDAG),
2639 CurDAG->getRegister(0, MVT::i32),
2640 CurDAG->getRegister(0, MVT::i32) };
2641 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2642 ARM::UMLAL : ARM::UMLALv5,
2643 dl, MVT::i32, MVT::i32, Ops);
2646 case ARMISD::SMLAL:{
2647 if (Subtarget->isThumb()) {
2648 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2649 N->getOperand(3), getAL(CurDAG),
2650 CurDAG->getRegister(0, MVT::i32)};
2651 return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops);
2653 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2654 N->getOperand(3), getAL(CurDAG),
2655 CurDAG->getRegister(0, MVT::i32),
2656 CurDAG->getRegister(0, MVT::i32) };
2657 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2658 ARM::SMLAL : ARM::SMLALv5,
2659 dl, MVT::i32, MVT::i32, Ops);
2663 SDNode *ResNode = 0;
2664 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2665 ResNode = SelectT2IndexedLoad(N);
2667 ResNode = SelectARMIndexedLoad(N);
2670 // Other cases are autogenerated.
2673 case ARMISD::BRCOND: {
2674 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2675 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2676 // Pattern complexity = 6 cost = 1 size = 0
2678 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2679 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2680 // Pattern complexity = 6 cost = 1 size = 0
2682 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2683 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2684 // Pattern complexity = 6 cost = 1 size = 0
2686 unsigned Opc = Subtarget->isThumb() ?
2687 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2688 SDValue Chain = N->getOperand(0);
2689 SDValue N1 = N->getOperand(1);
2690 SDValue N2 = N->getOperand(2);
2691 SDValue N3 = N->getOperand(3);
2692 SDValue InFlag = N->getOperand(4);
2693 assert(N1.getOpcode() == ISD::BasicBlock);
2694 assert(N2.getOpcode() == ISD::Constant);
2695 assert(N3.getOpcode() == ISD::Register);
2697 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2698 cast<ConstantSDNode>(N2)->getZExtValue()),
2700 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2701 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2703 Chain = SDValue(ResNode, 0);
2704 if (N->getNumValues() == 2) {
2705 InFlag = SDValue(ResNode, 1);
2706 ReplaceUses(SDValue(N, 1), InFlag);
2708 ReplaceUses(SDValue(N, 0),
2709 SDValue(Chain.getNode(), Chain.getResNo()));
2712 case ARMISD::VZIP: {
2714 EVT VT = N->getValueType(0);
2715 switch (VT.getSimpleVT().SimpleTy) {
2716 default: return NULL;
2717 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2718 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2720 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2721 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2722 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2723 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2725 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2727 SDValue Pred = getAL(CurDAG);
2728 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2729 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2730 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2732 case ARMISD::VUZP: {
2734 EVT VT = N->getValueType(0);
2735 switch (VT.getSimpleVT().SimpleTy) {
2736 default: return NULL;
2737 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2738 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2740 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2741 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2742 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2743 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2745 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2747 SDValue Pred = getAL(CurDAG);
2748 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2749 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2750 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2752 case ARMISD::VTRN: {
2754 EVT VT = N->getValueType(0);
2755 switch (VT.getSimpleVT().SimpleTy) {
2756 default: return NULL;
2757 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2758 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2760 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2761 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2762 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2764 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2766 SDValue Pred = getAL(CurDAG);
2767 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2768 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2769 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2771 case ARMISD::BUILD_VECTOR: {
2772 EVT VecVT = N->getValueType(0);
2773 EVT EltVT = VecVT.getVectorElementType();
2774 unsigned NumElts = VecVT.getVectorNumElements();
2775 if (EltVT == MVT::f64) {
2776 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
2777 return createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2779 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
2781 return createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2782 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
2783 return createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
2784 N->getOperand(2), N->getOperand(3));
2787 case ARMISD::VLD2DUP: {
2788 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
2790 return SelectVLDDup(N, false, 2, Opcodes);
2793 case ARMISD::VLD3DUP: {
2794 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
2795 ARM::VLD3DUPd16Pseudo,
2796 ARM::VLD3DUPd32Pseudo };
2797 return SelectVLDDup(N, false, 3, Opcodes);
2800 case ARMISD::VLD4DUP: {
2801 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
2802 ARM::VLD4DUPd16Pseudo,
2803 ARM::VLD4DUPd32Pseudo };
2804 return SelectVLDDup(N, false, 4, Opcodes);
2807 case ARMISD::VLD2DUP_UPD: {
2808 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
2809 ARM::VLD2DUPd16wb_fixed,
2810 ARM::VLD2DUPd32wb_fixed };
2811 return SelectVLDDup(N, true, 2, Opcodes);
2814 case ARMISD::VLD3DUP_UPD: {
2815 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
2816 ARM::VLD3DUPd16Pseudo_UPD,
2817 ARM::VLD3DUPd32Pseudo_UPD };
2818 return SelectVLDDup(N, true, 3, Opcodes);
2821 case ARMISD::VLD4DUP_UPD: {
2822 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
2823 ARM::VLD4DUPd16Pseudo_UPD,
2824 ARM::VLD4DUPd32Pseudo_UPD };
2825 return SelectVLDDup(N, true, 4, Opcodes);
2828 case ARMISD::VLD1_UPD: {
2829 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
2830 ARM::VLD1d16wb_fixed,
2831 ARM::VLD1d32wb_fixed,
2832 ARM::VLD1d64wb_fixed };
2833 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
2834 ARM::VLD1q16wb_fixed,
2835 ARM::VLD1q32wb_fixed,
2836 ARM::VLD1q64wb_fixed };
2837 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
2840 case ARMISD::VLD2_UPD: {
2841 static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed,
2842 ARM::VLD2d16wb_fixed,
2843 ARM::VLD2d32wb_fixed,
2844 ARM::VLD1q64wb_fixed};
2845 static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
2846 ARM::VLD2q16PseudoWB_fixed,
2847 ARM::VLD2q32PseudoWB_fixed };
2848 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
2851 case ARMISD::VLD3_UPD: {
2852 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
2853 ARM::VLD3d16Pseudo_UPD,
2854 ARM::VLD3d32Pseudo_UPD,
2855 ARM::VLD1d64TPseudoWB_fixed};
2856 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2857 ARM::VLD3q16Pseudo_UPD,
2858 ARM::VLD3q32Pseudo_UPD };
2859 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
2860 ARM::VLD3q16oddPseudo_UPD,
2861 ARM::VLD3q32oddPseudo_UPD };
2862 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2865 case ARMISD::VLD4_UPD: {
2866 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD,
2867 ARM::VLD4d16Pseudo_UPD,
2868 ARM::VLD4d32Pseudo_UPD,
2869 ARM::VLD1d64QPseudoWB_fixed};
2870 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2871 ARM::VLD4q16Pseudo_UPD,
2872 ARM::VLD4q32Pseudo_UPD };
2873 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
2874 ARM::VLD4q16oddPseudo_UPD,
2875 ARM::VLD4q32oddPseudo_UPD };
2876 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2879 case ARMISD::VLD2LN_UPD: {
2880 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
2881 ARM::VLD2LNd16Pseudo_UPD,
2882 ARM::VLD2LNd32Pseudo_UPD };
2883 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
2884 ARM::VLD2LNq32Pseudo_UPD };
2885 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
2888 case ARMISD::VLD3LN_UPD: {
2889 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
2890 ARM::VLD3LNd16Pseudo_UPD,
2891 ARM::VLD3LNd32Pseudo_UPD };
2892 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
2893 ARM::VLD3LNq32Pseudo_UPD };
2894 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
2897 case ARMISD::VLD4LN_UPD: {
2898 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
2899 ARM::VLD4LNd16Pseudo_UPD,
2900 ARM::VLD4LNd32Pseudo_UPD };
2901 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
2902 ARM::VLD4LNq32Pseudo_UPD };
2903 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
2906 case ARMISD::VST1_UPD: {
2907 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
2908 ARM::VST1d16wb_fixed,
2909 ARM::VST1d32wb_fixed,
2910 ARM::VST1d64wb_fixed };
2911 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
2912 ARM::VST1q16wb_fixed,
2913 ARM::VST1q32wb_fixed,
2914 ARM::VST1q64wb_fixed };
2915 return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
2918 case ARMISD::VST2_UPD: {
2919 static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed,
2920 ARM::VST2d16wb_fixed,
2921 ARM::VST2d32wb_fixed,
2922 ARM::VST1q64wb_fixed};
2923 static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
2924 ARM::VST2q16PseudoWB_fixed,
2925 ARM::VST2q32PseudoWB_fixed };
2926 return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
2929 case ARMISD::VST3_UPD: {
2930 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
2931 ARM::VST3d16Pseudo_UPD,
2932 ARM::VST3d32Pseudo_UPD,
2933 ARM::VST1d64TPseudoWB_fixed};
2934 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
2935 ARM::VST3q16Pseudo_UPD,
2936 ARM::VST3q32Pseudo_UPD };
2937 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
2938 ARM::VST3q16oddPseudo_UPD,
2939 ARM::VST3q32oddPseudo_UPD };
2940 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2943 case ARMISD::VST4_UPD: {
2944 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD,
2945 ARM::VST4d16Pseudo_UPD,
2946 ARM::VST4d32Pseudo_UPD,
2947 ARM::VST1d64QPseudoWB_fixed};
2948 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
2949 ARM::VST4q16Pseudo_UPD,
2950 ARM::VST4q32Pseudo_UPD };
2951 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
2952 ARM::VST4q16oddPseudo_UPD,
2953 ARM::VST4q32oddPseudo_UPD };
2954 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2957 case ARMISD::VST2LN_UPD: {
2958 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
2959 ARM::VST2LNd16Pseudo_UPD,
2960 ARM::VST2LNd32Pseudo_UPD };
2961 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
2962 ARM::VST2LNq32Pseudo_UPD };
2963 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
2966 case ARMISD::VST3LN_UPD: {
2967 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
2968 ARM::VST3LNd16Pseudo_UPD,
2969 ARM::VST3LNd32Pseudo_UPD };
2970 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
2971 ARM::VST3LNq32Pseudo_UPD };
2972 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
2975 case ARMISD::VST4LN_UPD: {
2976 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
2977 ARM::VST4LNd16Pseudo_UPD,
2978 ARM::VST4LNd32Pseudo_UPD };
2979 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
2980 ARM::VST4LNq32Pseudo_UPD };
2981 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
2984 case ISD::INTRINSIC_VOID:
2985 case ISD::INTRINSIC_W_CHAIN: {
2986 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2991 case Intrinsic::arm_ldaexd:
2992 case Intrinsic::arm_ldrexd: {
2994 SDValue Chain = N->getOperand(0);
2995 SDValue MemAddr = N->getOperand(2);
2996 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
2998 bool IsAcquire = IntNo == Intrinsic::arm_ldaexd;
2999 unsigned NewOpc = isThumb ? (IsAcquire ? ARM::t2LDAEXD : ARM::t2LDREXD)
3000 : (IsAcquire ? ARM::LDAEXD : ARM::LDREXD);
3002 // arm_ldrexd returns a i64 value in {i32, i32}
3003 std::vector<EVT> ResTys;
3005 ResTys.push_back(MVT::i32);
3006 ResTys.push_back(MVT::i32);
3008 ResTys.push_back(MVT::Untyped);
3009 ResTys.push_back(MVT::Other);
3011 // Place arguments in the right order.
3012 SmallVector<SDValue, 7> Ops;
3013 Ops.push_back(MemAddr);
3014 Ops.push_back(getAL(CurDAG));
3015 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3016 Ops.push_back(Chain);
3017 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3018 // Transfer memoperands.
3019 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3020 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3021 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
3024 SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1);
3025 if (!SDValue(N, 0).use_empty()) {
3028 Result = SDValue(Ld, 0);
3030 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
3031 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3032 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3033 Result = SDValue(ResNode,0);
3035 ReplaceUses(SDValue(N, 0), Result);
3037 if (!SDValue(N, 1).use_empty()) {
3040 Result = SDValue(Ld, 1);
3042 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
3043 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3044 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3045 Result = SDValue(ResNode,0);
3047 ReplaceUses(SDValue(N, 1), Result);
3049 ReplaceUses(SDValue(N, 2), OutChain);
3052 case Intrinsic::arm_stlexd:
3053 case Intrinsic::arm_strexd: {
3055 SDValue Chain = N->getOperand(0);
3056 SDValue Val0 = N->getOperand(2);
3057 SDValue Val1 = N->getOperand(3);
3058 SDValue MemAddr = N->getOperand(4);
3060 // Store exclusive double return a i32 value which is the return status
3061 // of the issued store.
3062 EVT ResTys[] = { MVT::i32, MVT::Other };
3064 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
3065 // Place arguments in the right order.
3066 SmallVector<SDValue, 7> Ops;
3068 Ops.push_back(Val0);
3069 Ops.push_back(Val1);
3071 // arm_strexd uses GPRPair.
3072 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0));
3073 Ops.push_back(MemAddr);
3074 Ops.push_back(getAL(CurDAG));
3075 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3076 Ops.push_back(Chain);
3078 bool IsRelease = IntNo == Intrinsic::arm_stlexd;
3079 unsigned NewOpc = isThumb ? (IsRelease ? ARM::t2STLEXD : ARM::t2STREXD)
3080 : (IsRelease ? ARM::STLEXD : ARM::STREXD);
3082 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3083 // Transfer memoperands.
3084 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3085 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3086 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
3091 case Intrinsic::arm_neon_vld1: {
3092 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
3093 ARM::VLD1d32, ARM::VLD1d64 };
3094 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
3095 ARM::VLD1q32, ARM::VLD1q64};
3096 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
3099 case Intrinsic::arm_neon_vld2: {
3100 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
3101 ARM::VLD2d32, ARM::VLD1q64 };
3102 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
3103 ARM::VLD2q32Pseudo };
3104 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
3107 case Intrinsic::arm_neon_vld3: {
3108 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
3111 ARM::VLD1d64TPseudo };
3112 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
3113 ARM::VLD3q16Pseudo_UPD,
3114 ARM::VLD3q32Pseudo_UPD };
3115 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
3116 ARM::VLD3q16oddPseudo,
3117 ARM::VLD3q32oddPseudo };
3118 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3121 case Intrinsic::arm_neon_vld4: {
3122 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
3125 ARM::VLD1d64QPseudo };
3126 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
3127 ARM::VLD4q16Pseudo_UPD,
3128 ARM::VLD4q32Pseudo_UPD };
3129 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
3130 ARM::VLD4q16oddPseudo,
3131 ARM::VLD4q32oddPseudo };
3132 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3135 case Intrinsic::arm_neon_vld2lane: {
3136 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
3137 ARM::VLD2LNd16Pseudo,
3138 ARM::VLD2LNd32Pseudo };
3139 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
3140 ARM::VLD2LNq32Pseudo };
3141 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
3144 case Intrinsic::arm_neon_vld3lane: {
3145 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
3146 ARM::VLD3LNd16Pseudo,
3147 ARM::VLD3LNd32Pseudo };
3148 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
3149 ARM::VLD3LNq32Pseudo };
3150 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
3153 case Intrinsic::arm_neon_vld4lane: {
3154 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
3155 ARM::VLD4LNd16Pseudo,
3156 ARM::VLD4LNd32Pseudo };
3157 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
3158 ARM::VLD4LNq32Pseudo };
3159 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
3162 case Intrinsic::arm_neon_vst1: {
3163 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
3164 ARM::VST1d32, ARM::VST1d64 };
3165 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
3166 ARM::VST1q32, ARM::VST1q64 };
3167 return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
3170 case Intrinsic::arm_neon_vst2: {
3171 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
3172 ARM::VST2d32, ARM::VST1q64 };
3173 static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
3174 ARM::VST2q32Pseudo };
3175 return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
3178 case Intrinsic::arm_neon_vst3: {
3179 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
3182 ARM::VST1d64TPseudo };
3183 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3184 ARM::VST3q16Pseudo_UPD,
3185 ARM::VST3q32Pseudo_UPD };
3186 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
3187 ARM::VST3q16oddPseudo,
3188 ARM::VST3q32oddPseudo };
3189 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3192 case Intrinsic::arm_neon_vst4: {
3193 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
3196 ARM::VST1d64QPseudo };
3197 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3198 ARM::VST4q16Pseudo_UPD,
3199 ARM::VST4q32Pseudo_UPD };
3200 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
3201 ARM::VST4q16oddPseudo,
3202 ARM::VST4q32oddPseudo };
3203 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3206 case Intrinsic::arm_neon_vst2lane: {
3207 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
3208 ARM::VST2LNd16Pseudo,
3209 ARM::VST2LNd32Pseudo };
3210 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
3211 ARM::VST2LNq32Pseudo };
3212 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
3215 case Intrinsic::arm_neon_vst3lane: {
3216 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
3217 ARM::VST3LNd16Pseudo,
3218 ARM::VST3LNd32Pseudo };
3219 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
3220 ARM::VST3LNq32Pseudo };
3221 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
3224 case Intrinsic::arm_neon_vst4lane: {
3225 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
3226 ARM::VST4LNd16Pseudo,
3227 ARM::VST4LNd32Pseudo };
3228 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
3229 ARM::VST4LNq32Pseudo };
3230 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
3236 case ISD::INTRINSIC_WO_CHAIN: {
3237 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3242 case Intrinsic::arm_neon_vtbl2:
3243 return SelectVTBL(N, false, 2, ARM::VTBL2);
3244 case Intrinsic::arm_neon_vtbl3:
3245 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
3246 case Intrinsic::arm_neon_vtbl4:
3247 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
3249 case Intrinsic::arm_neon_vtbx2:
3250 return SelectVTBL(N, true, 2, ARM::VTBX2);
3251 case Intrinsic::arm_neon_vtbx3:
3252 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
3253 case Intrinsic::arm_neon_vtbx4:
3254 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
3259 case ARMISD::VTBL1: {
3261 EVT VT = N->getValueType(0);
3262 SmallVector<SDValue, 6> Ops;
3264 Ops.push_back(N->getOperand(0));
3265 Ops.push_back(N->getOperand(1));
3266 Ops.push_back(getAL(CurDAG)); // Predicate
3267 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3268 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops);
3270 case ARMISD::VTBL2: {
3272 EVT VT = N->getValueType(0);
3274 // Form a REG_SEQUENCE to force register allocation.
3275 SDValue V0 = N->getOperand(0);
3276 SDValue V1 = N->getOperand(1);
3277 SDValue RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
3279 SmallVector<SDValue, 6> Ops;
3280 Ops.push_back(RegSeq);
3281 Ops.push_back(N->getOperand(2));
3282 Ops.push_back(getAL(CurDAG)); // Predicate
3283 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3284 return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops);
3287 case ISD::CONCAT_VECTORS:
3288 return SelectConcatVector(N);
3291 return SelectCode(N);
3294 SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
3295 std::vector<SDValue> AsmNodeOperands;
3296 unsigned Flag, Kind;
3297 bool Changed = false;
3298 unsigned NumOps = N->getNumOperands();
3300 // Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint.
3301 // However, some instrstions (e.g. ldrexd/strexd in ARM mode) require
3302 // (even/even+1) GPRs and use %n and %Hn to refer to the individual regs
3303 // respectively. Since there is no constraint to explicitly specify a
3304 // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb,
3305 // the 64-bit data may be referred by H, Q, R modifiers, so we still pack
3306 // them into a GPRPair.
3309 SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1) : SDValue(0,0);
3311 SmallVector<bool, 8> OpChanged;
3312 // Glue node will be appended late.
3313 for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
3314 SDValue op = N->getOperand(i);
3315 AsmNodeOperands.push_back(op);
3317 if (i < InlineAsm::Op_FirstOperand)
3320 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(i))) {
3321 Flag = C->getZExtValue();
3322 Kind = InlineAsm::getKind(Flag);
3327 // Immediate operands to inline asm in the SelectionDAG are modeled with
3328 // two operands. The first is a constant of value InlineAsm::Kind_Imm, and
3329 // the second is a constant with the value of the immediate. If we get here
3330 // and we have a Kind_Imm, skip the next operand, and continue.
3331 if (Kind == InlineAsm::Kind_Imm) {
3332 SDValue op = N->getOperand(++i);
3333 AsmNodeOperands.push_back(op);
3337 unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag);
3339 OpChanged.push_back(false);
3341 unsigned DefIdx = 0;
3342 bool IsTiedToChangedOp = false;
3343 // If it's a use that is tied with a previous def, it has no
3344 // reg class constraint.
3345 if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
3346 IsTiedToChangedOp = OpChanged[DefIdx];
3348 if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
3349 && Kind != InlineAsm::Kind_RegDefEarlyClobber)
3353 bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC);
3354 if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID))
3358 assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
3359 SDValue V0 = N->getOperand(i+1);
3360 SDValue V1 = N->getOperand(i+2);
3361 unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
3362 unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
3364 MachineRegisterInfo &MRI = MF->getRegInfo();
3366 if (Kind == InlineAsm::Kind_RegDef ||
3367 Kind == InlineAsm::Kind_RegDefEarlyClobber) {
3368 // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to
3369 // the original GPRs.
3371 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3372 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3373 SDValue Chain = SDValue(N,0);
3375 SDNode *GU = N->getGluedUser();
3376 SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::Untyped,
3379 // Extract values from a GPRPair reg and copy to the original GPR reg.
3380 SDValue Sub0 = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
3382 SDValue Sub1 = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
3384 SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0,
3385 RegCopy.getValue(1));
3386 SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1));
3388 // Update the original glue user.
3389 std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
3390 Ops.push_back(T1.getValue(1));
3391 CurDAG->UpdateNodeOperands(GU, &Ops[0], Ops.size());
3395 // For Kind == InlineAsm::Kind_RegUse, we first copy two GPRs into a
3396 // GPRPair and then pass the GPRPair to the inline asm.
3397 SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain];
3399 // As REG_SEQ doesn't take RegisterSDNode, we copy them first.
3400 SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32,
3402 SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32,
3404 SDValue Pair = SDValue(createGPRPairNode(MVT::Untyped, T0, T1), 0);
3406 // Copy REG_SEQ into a GPRPair-typed VR and replace the original two
3407 // i32 VRs of inline asm with it.
3408 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3409 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3410 Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1));
3412 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
3413 Glue = Chain.getValue(1);
3418 if(PairedReg.getNode()) {
3419 OpChanged[OpChanged.size() -1 ] = true;
3420 Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/);
3421 if (IsTiedToChangedOp)
3422 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx);
3424 Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
3425 // Replace the current flag.
3426 AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
3428 // Add the new register node and skip the original two GPRs.
3429 AsmNodeOperands.push_back(PairedReg);
3430 // Skip the next two GPRs.
3436 AsmNodeOperands.push_back(Glue);
3440 SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
3441 CurDAG->getVTList(MVT::Other, MVT::Glue), &AsmNodeOperands[0],
3442 AsmNodeOperands.size());
3444 return New.getNode();
3448 bool ARMDAGToDAGISel::
3449 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
3450 std::vector<SDValue> &OutOps) {
3451 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
3452 // Require the address to be in a register. That is safe for all ARM
3453 // variants and it is hard to do anything much smarter without knowing
3454 // how the operand is used.
3455 OutOps.push_back(Op);
3459 /// createARMISelDag - This pass converts a legalized DAG into a
3460 /// ARM-specific DAG, ready for instruction scheduling.
3462 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
3463 CodeGenOpt::Level OptLevel) {
3464 return new ARMDAGToDAGISel(TM, OptLevel);