1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "arm-isel"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMTargetMachine.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/LLVMContext.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/SelectionDAGISel.h"
30 #include "llvm/Target/TargetLowering.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Compiler.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
41 DisableShifterOp("disable-shifter-op", cl::Hidden,
42 cl::desc("Disable isel of shifter-op"),
46 CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
47 cl::desc("Check fp vmla / vmls hazard at isel time"),
50 //===--------------------------------------------------------------------===//
51 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
52 /// instructions for SelectionDAG operations.
57 AM2_BASE, // Simple AM2 (+-imm12)
58 AM2_SHOP // Shifter-op AM2
61 class ARMDAGToDAGISel : public SelectionDAGISel {
62 ARMBaseTargetMachine &TM;
63 const ARMBaseInstrInfo *TII;
65 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
66 /// make the right decision when generating code for different targets.
67 const ARMSubtarget *Subtarget;
70 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
71 CodeGenOpt::Level OptLevel)
72 : SelectionDAGISel(tm, OptLevel), TM(tm),
73 TII(static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo())),
74 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
77 virtual const char *getPassName() const {
78 return "ARM Instruction Selection";
81 /// getI32Imm - Return a target constant of type i32 with the specified
83 inline SDValue getI32Imm(unsigned Imm) {
84 return CurDAG->getTargetConstant(Imm, MVT::i32);
87 SDNode *Select(SDNode *N);
90 bool hasNoVMLxHazardUse(SDNode *N) const;
91 bool isShifterOpProfitable(const SDValue &Shift,
92 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
93 bool SelectRegShifterOperand(SDValue N, SDValue &A,
94 SDValue &B, SDValue &C,
95 bool CheckProfitability = true);
96 bool SelectImmShifterOperand(SDValue N, SDValue &A,
97 SDValue &B, bool CheckProfitability = true);
98 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A,
99 SDValue &B, SDValue &C) {
100 // Don't apply the profitability check
101 return SelectRegShifterOperand(N, A, B, C, false);
103 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A,
105 // Don't apply the profitability check
106 return SelectImmShifterOperand(N, A, B, false);
109 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
110 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
112 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
113 SDValue &Offset, SDValue &Opc);
114 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
116 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
119 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
121 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
124 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
126 SelectAddrMode2Worker(N, Base, Offset, Opc);
127 // return SelectAddrMode2ShOp(N, Base, Offset, Opc);
128 // This always matches one way or another.
132 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
133 SDValue &Offset, SDValue &Opc);
134 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
135 SDValue &Offset, SDValue &Opc);
136 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
137 SDValue &Offset, SDValue &Opc);
138 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
139 bool SelectAddrMode3(SDValue N, SDValue &Base,
140 SDValue &Offset, SDValue &Opc);
141 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
142 SDValue &Offset, SDValue &Opc);
143 bool SelectAddrMode5(SDValue N, SDValue &Base,
145 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
146 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
148 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
150 // Thumb Addressing Modes:
151 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
152 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
154 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
155 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
156 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
157 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
159 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
161 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
163 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
165 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
167 // Thumb 2 Addressing Modes:
168 bool SelectT2ShifterOperandReg(SDValue N,
169 SDValue &BaseReg, SDValue &Opc);
170 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
171 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
173 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
175 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
176 SDValue &OffReg, SDValue &ShImm);
178 inline bool is_so_imm(unsigned Imm) const {
179 return ARM_AM::getSOImmVal(Imm) != -1;
182 inline bool is_so_imm_not(unsigned Imm) const {
183 return ARM_AM::getSOImmVal(~Imm) != -1;
186 inline bool is_t2_so_imm(unsigned Imm) const {
187 return ARM_AM::getT2SOImmVal(Imm) != -1;
190 inline bool is_t2_so_imm_not(unsigned Imm) const {
191 return ARM_AM::getT2SOImmVal(~Imm) != -1;
194 // Include the pieces autogenerated from the target description.
195 #include "ARMGenDAGISel.inc"
198 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
200 SDNode *SelectARMIndexedLoad(SDNode *N);
201 SDNode *SelectT2IndexedLoad(SDNode *N);
203 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
204 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
205 /// loads of D registers and even subregs and odd subregs of Q registers.
206 /// For NumVecs <= 2, QOpcodes1 is not used.
207 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
209 unsigned *QOpcodes0, unsigned *QOpcodes1);
211 /// SelectVST - Select NEON store intrinsics. NumVecs should
212 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
213 /// stores of D registers and even subregs and odd subregs of Q registers.
214 /// For NumVecs <= 2, QOpcodes1 is not used.
215 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
217 unsigned *QOpcodes0, unsigned *QOpcodes1);
219 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
220 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
221 /// load/store of D registers and Q registers.
222 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
223 bool isUpdating, unsigned NumVecs,
224 unsigned *DOpcodes, unsigned *QOpcodes);
226 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
227 /// should be 2, 3 or 4. The opcode array specifies the instructions used
228 /// for loading D registers. (Q registers are not supported.)
229 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
232 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
233 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
234 /// generated to force the table registers to be consecutive.
235 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
237 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
238 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
240 /// SelectCMOVOp - Select CMOV instructions for ARM.
241 SDNode *SelectCMOVOp(SDNode *N);
242 SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
243 ARMCC::CondCodes CCVal, SDValue CCR,
245 SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
246 ARMCC::CondCodes CCVal, SDValue CCR,
248 SDNode *SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
249 ARMCC::CondCodes CCVal, SDValue CCR,
251 SDNode *SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
252 ARMCC::CondCodes CCVal, SDValue CCR,
255 SDNode *SelectConcatVector(SDNode *N);
257 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
258 /// inline asm expressions.
259 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
261 std::vector<SDValue> &OutOps);
263 // Form pairs of consecutive S, D, or Q registers.
264 SDNode *PairSRegs(EVT VT, SDValue V0, SDValue V1);
265 SDNode *PairDRegs(EVT VT, SDValue V0, SDValue V1);
266 SDNode *PairQRegs(EVT VT, SDValue V0, SDValue V1);
268 // Form sequences of 4 consecutive S, D, or Q registers.
269 SDNode *QuadSRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
270 SDNode *QuadDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
271 SDNode *QuadQRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
273 // Get the alignment operand for a NEON VLD or VST instruction.
274 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
278 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
279 /// operand. If so Imm will receive the 32-bit value.
280 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
281 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
282 Imm = cast<ConstantSDNode>(N)->getZExtValue();
288 // isInt32Immediate - This method tests to see if a constant operand.
289 // If so Imm will receive the 32 bit value.
290 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
291 return isInt32Immediate(N.getNode(), Imm);
294 // isOpcWithIntImmediate - This method tests to see if the node is a specific
295 // opcode and that it has a immediate integer right operand.
296 // If so Imm will receive the 32 bit value.
297 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
298 return N->getOpcode() == Opc &&
299 isInt32Immediate(N->getOperand(1).getNode(), Imm);
302 /// \brief Check whether a particular node is a constant value representable as
303 /// (N * Scale) where (N in [\arg RangeMin, \arg RangeMax).
305 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
306 static bool isScaledConstantInRange(SDValue Node, unsigned Scale,
307 int RangeMin, int RangeMax,
308 int &ScaledConstant) {
309 assert(Scale && "Invalid scale!");
311 // Check that this is a constant.
312 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
316 ScaledConstant = (int) C->getZExtValue();
317 if ((ScaledConstant % Scale) != 0)
320 ScaledConstant /= Scale;
321 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
324 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
325 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
326 /// least on current ARM implementations) which should be avoidded.
327 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
328 if (OptLevel == CodeGenOpt::None)
331 if (!CheckVMLxHazard)
334 if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9())
340 SDNode *Use = *N->use_begin();
341 if (Use->getOpcode() == ISD::CopyToReg)
343 if (Use->isMachineOpcode()) {
344 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
347 unsigned Opcode = MCID.getOpcode();
348 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
350 // vmlx feeding into another vmlx. We actually want to unfold
351 // the use later in the MLxExpansion pass. e.g.
353 // vmla (stall 8 cycles)
358 // This adds up to about 18 - 19 cycles.
361 // vmul (stall 4 cycles)
362 // vadd adds up to about 14 cycles.
363 return TII->isFpMLxInstruction(Opcode);
369 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
370 ARM_AM::ShiftOpc ShOpcVal,
372 if (!Subtarget->isCortexA9())
374 if (Shift.hasOneUse())
377 return ShOpcVal == ARM_AM::lsl && ShAmt == 2;
380 bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
383 bool CheckProfitability) {
384 if (DisableShifterOp)
387 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
389 // Don't match base register only case. That is matched to a separate
390 // lower complexity pattern with explicit register operand.
391 if (ShOpcVal == ARM_AM::no_shift) return false;
393 BaseReg = N.getOperand(0);
394 unsigned ShImmVal = 0;
395 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
396 if (!RHS) return false;
397 ShImmVal = RHS->getZExtValue() & 31;
398 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
403 bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
407 bool CheckProfitability) {
408 if (DisableShifterOp)
411 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
413 // Don't match base register only case. That is matched to a separate
414 // lower complexity pattern with explicit register operand.
415 if (ShOpcVal == ARM_AM::no_shift) return false;
417 BaseReg = N.getOperand(0);
418 unsigned ShImmVal = 0;
419 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
420 if (RHS) return false;
422 ShReg = N.getOperand(1);
423 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
425 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
431 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
434 // Match simple R + imm12 operands.
437 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
438 !CurDAG->isBaseWithConstantOffset(N)) {
439 if (N.getOpcode() == ISD::FrameIndex) {
440 // Match frame index.
441 int FI = cast<FrameIndexSDNode>(N)->getIndex();
442 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
443 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
447 if (N.getOpcode() == ARMISD::Wrapper &&
448 !(Subtarget->useMovt() &&
449 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
450 Base = N.getOperand(0);
453 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
457 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
458 int RHSC = (int)RHS->getZExtValue();
459 if (N.getOpcode() == ISD::SUB)
462 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
463 Base = N.getOperand(0);
464 if (Base.getOpcode() == ISD::FrameIndex) {
465 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
466 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
468 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
475 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
481 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
483 if (N.getOpcode() == ISD::MUL &&
484 (!Subtarget->isCortexA9() || N.hasOneUse())) {
485 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
486 // X * [3,5,9] -> X + X * [2,4,8] etc.
487 int RHSC = (int)RHS->getZExtValue();
490 ARM_AM::AddrOpc AddSub = ARM_AM::add;
492 AddSub = ARM_AM::sub;
495 if (isPowerOf2_32(RHSC)) {
496 unsigned ShAmt = Log2_32(RHSC);
497 Base = Offset = N.getOperand(0);
498 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
507 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
508 // ISD::OR that is equivalent to an ISD::ADD.
509 !CurDAG->isBaseWithConstantOffset(N))
512 // Leave simple R +/- imm12 operands for LDRi12
513 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
515 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
516 -0x1000+1, 0x1000, RHSC)) // 12 bits.
520 if (Subtarget->isCortexA9() && !N.hasOneUse())
521 // Compute R +/- (R << N) and reuse it.
524 // Otherwise this is R +/- [possibly shifted] R.
525 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
526 ARM_AM::ShiftOpc ShOpcVal =
527 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
530 Base = N.getOperand(0);
531 Offset = N.getOperand(1);
533 if (ShOpcVal != ARM_AM::no_shift) {
534 // Check to see if the RHS of the shift is a constant, if not, we can't fold
536 if (ConstantSDNode *Sh =
537 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
538 ShAmt = Sh->getZExtValue();
539 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
540 Offset = N.getOperand(1).getOperand(0);
543 ShOpcVal = ARM_AM::no_shift;
546 ShOpcVal = ARM_AM::no_shift;
550 // Try matching (R shl C) + (R).
551 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
552 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
553 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
554 if (ShOpcVal != ARM_AM::no_shift) {
555 // Check to see if the RHS of the shift is a constant, if not, we can't
557 if (ConstantSDNode *Sh =
558 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
559 ShAmt = Sh->getZExtValue();
560 if (!Subtarget->isCortexA9() ||
562 isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt))) {
563 Offset = N.getOperand(0).getOperand(0);
564 Base = N.getOperand(1);
567 ShOpcVal = ARM_AM::no_shift;
570 ShOpcVal = ARM_AM::no_shift;
575 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
585 AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
589 if (N.getOpcode() == ISD::MUL &&
590 (!Subtarget->isCortexA9() || N.hasOneUse())) {
591 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
592 // X * [3,5,9] -> X + X * [2,4,8] etc.
593 int RHSC = (int)RHS->getZExtValue();
596 ARM_AM::AddrOpc AddSub = ARM_AM::add;
598 AddSub = ARM_AM::sub;
601 if (isPowerOf2_32(RHSC)) {
602 unsigned ShAmt = Log2_32(RHSC);
603 Base = Offset = N.getOperand(0);
604 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
613 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
614 // ISD::OR that is equivalent to an ADD.
615 !CurDAG->isBaseWithConstantOffset(N)) {
617 if (N.getOpcode() == ISD::FrameIndex) {
618 int FI = cast<FrameIndexSDNode>(N)->getIndex();
619 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
620 } else if (N.getOpcode() == ARMISD::Wrapper &&
621 !(Subtarget->useMovt() &&
622 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
623 Base = N.getOperand(0);
625 Offset = CurDAG->getRegister(0, MVT::i32);
626 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
632 // Match simple R +/- imm12 operands.
633 if (N.getOpcode() != ISD::SUB) {
635 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
636 -0x1000+1, 0x1000, RHSC)) { // 12 bits.
637 Base = N.getOperand(0);
638 if (Base.getOpcode() == ISD::FrameIndex) {
639 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
640 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
642 Offset = CurDAG->getRegister(0, MVT::i32);
644 ARM_AM::AddrOpc AddSub = ARM_AM::add;
646 AddSub = ARM_AM::sub;
649 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
656 if (Subtarget->isCortexA9() && !N.hasOneUse()) {
657 // Compute R +/- (R << N) and reuse it.
659 Offset = CurDAG->getRegister(0, MVT::i32);
660 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
666 // Otherwise this is R +/- [possibly shifted] R.
667 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
668 ARM_AM::ShiftOpc ShOpcVal =
669 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
672 Base = N.getOperand(0);
673 Offset = N.getOperand(1);
675 if (ShOpcVal != ARM_AM::no_shift) {
676 // Check to see if the RHS of the shift is a constant, if not, we can't fold
678 if (ConstantSDNode *Sh =
679 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
680 ShAmt = Sh->getZExtValue();
681 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
682 Offset = N.getOperand(1).getOperand(0);
685 ShOpcVal = ARM_AM::no_shift;
688 ShOpcVal = ARM_AM::no_shift;
692 // Try matching (R shl C) + (R).
693 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
694 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
695 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
696 if (ShOpcVal != ARM_AM::no_shift) {
697 // Check to see if the RHS of the shift is a constant, if not, we can't
699 if (ConstantSDNode *Sh =
700 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
701 ShAmt = Sh->getZExtValue();
702 if (!Subtarget->isCortexA9() ||
704 isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt))) {
705 Offset = N.getOperand(0).getOperand(0);
706 Base = N.getOperand(1);
709 ShOpcVal = ARM_AM::no_shift;
712 ShOpcVal = ARM_AM::no_shift;
717 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
722 bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
723 SDValue &Offset, SDValue &Opc) {
724 unsigned Opcode = Op->getOpcode();
725 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
726 ? cast<LoadSDNode>(Op)->getAddressingMode()
727 : cast<StoreSDNode>(Op)->getAddressingMode();
728 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
729 ? ARM_AM::add : ARM_AM::sub;
731 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
735 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
737 if (ShOpcVal != ARM_AM::no_shift) {
738 // Check to see if the RHS of the shift is a constant, if not, we can't fold
740 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
741 ShAmt = Sh->getZExtValue();
742 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
743 Offset = N.getOperand(0);
746 ShOpcVal = ARM_AM::no_shift;
749 ShOpcVal = ARM_AM::no_shift;
753 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
758 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
759 SDValue &Offset, SDValue &Opc) {
761 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
762 Offset = CurDAG->getRegister(0, MVT::i32);
763 Opc = CurDAG->getTargetConstant(Val, MVT::i32);
771 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
772 SDValue &Offset, SDValue &Opc) {
773 unsigned Opcode = Op->getOpcode();
774 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
775 ? cast<LoadSDNode>(Op)->getAddressingMode()
776 : cast<StoreSDNode>(Op)->getAddressingMode();
777 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
778 ? ARM_AM::add : ARM_AM::sub;
780 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
781 Offset = CurDAG->getRegister(0, MVT::i32);
782 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
791 bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
796 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
797 SDValue &Base, SDValue &Offset,
799 if (N.getOpcode() == ISD::SUB) {
800 // X - C is canonicalize to X + -C, no need to handle it here.
801 Base = N.getOperand(0);
802 Offset = N.getOperand(1);
803 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
807 if (!CurDAG->isBaseWithConstantOffset(N)) {
809 if (N.getOpcode() == ISD::FrameIndex) {
810 int FI = cast<FrameIndexSDNode>(N)->getIndex();
811 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
813 Offset = CurDAG->getRegister(0, MVT::i32);
814 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
818 // If the RHS is +/- imm8, fold into addr mode.
820 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
821 -256 + 1, 256, RHSC)) { // 8 bits.
822 Base = N.getOperand(0);
823 if (Base.getOpcode() == ISD::FrameIndex) {
824 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
825 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
827 Offset = CurDAG->getRegister(0, MVT::i32);
829 ARM_AM::AddrOpc AddSub = ARM_AM::add;
831 AddSub = ARM_AM::sub;
834 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
838 Base = N.getOperand(0);
839 Offset = N.getOperand(1);
840 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
844 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
845 SDValue &Offset, SDValue &Opc) {
846 unsigned Opcode = Op->getOpcode();
847 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
848 ? cast<LoadSDNode>(Op)->getAddressingMode()
849 : cast<StoreSDNode>(Op)->getAddressingMode();
850 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
851 ? ARM_AM::add : ARM_AM::sub;
853 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
854 Offset = CurDAG->getRegister(0, MVT::i32);
855 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
860 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
864 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
865 SDValue &Base, SDValue &Offset) {
866 if (!CurDAG->isBaseWithConstantOffset(N)) {
868 if (N.getOpcode() == ISD::FrameIndex) {
869 int FI = cast<FrameIndexSDNode>(N)->getIndex();
870 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
871 } else if (N.getOpcode() == ARMISD::Wrapper &&
872 !(Subtarget->useMovt() &&
873 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
874 Base = N.getOperand(0);
876 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
881 // If the RHS is +/- imm8, fold into addr mode.
883 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
884 -256 + 1, 256, RHSC)) {
885 Base = N.getOperand(0);
886 if (Base.getOpcode() == ISD::FrameIndex) {
887 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
888 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
891 ARM_AM::AddrOpc AddSub = ARM_AM::add;
893 AddSub = ARM_AM::sub;
896 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
902 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
907 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
911 unsigned Alignment = 0;
912 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
913 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
914 // The maximum alignment is equal to the memory size being referenced.
915 unsigned LSNAlign = LSN->getAlignment();
916 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
917 if (LSNAlign > MemSize && MemSize > 1)
920 // All other uses of addrmode6 are for intrinsics. For now just record
921 // the raw alignment value; it will be refined later based on the legal
922 // alignment operands for the intrinsic.
923 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
926 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
930 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
932 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
933 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
934 if (AM != ISD::POST_INC)
937 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
938 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
939 Offset = CurDAG->getRegister(0, MVT::i32);
944 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
945 SDValue &Offset, SDValue &Label) {
946 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
947 Offset = N.getOperand(0);
948 SDValue N1 = N.getOperand(1);
949 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
958 //===----------------------------------------------------------------------===//
959 // Thumb Addressing Modes
960 //===----------------------------------------------------------------------===//
962 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
963 SDValue &Base, SDValue &Offset){
964 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
965 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
966 if (!NC || !NC->isNullValue())
973 Base = N.getOperand(0);
974 Offset = N.getOperand(1);
979 ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
980 SDValue &Offset, unsigned Scale) {
982 SDValue TmpBase, TmpOffImm;
983 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
984 return false; // We want to select tLDRspi / tSTRspi instead.
986 if (N.getOpcode() == ARMISD::Wrapper &&
987 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
988 return false; // We want to select tLDRpci instead.
991 if (!CurDAG->isBaseWithConstantOffset(N))
994 // Thumb does not have [sp, r] address mode.
995 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
996 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
997 if ((LHSR && LHSR->getReg() == ARM::SP) ||
998 (RHSR && RHSR->getReg() == ARM::SP))
1001 // FIXME: Why do we explicitly check for a match here and then return false?
1002 // Presumably to allow something else to match, but shouldn't this be
1005 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
1008 Base = N.getOperand(0);
1009 Offset = N.getOperand(1);
1014 ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
1017 return SelectThumbAddrModeRI(N, Base, Offset, 1);
1021 ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
1024 return SelectThumbAddrModeRI(N, Base, Offset, 2);
1028 ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
1031 return SelectThumbAddrModeRI(N, Base, Offset, 4);
1035 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1036 SDValue &Base, SDValue &OffImm) {
1038 SDValue TmpBase, TmpOffImm;
1039 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1040 return false; // We want to select tLDRspi / tSTRspi instead.
1042 if (N.getOpcode() == ARMISD::Wrapper &&
1043 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1044 return false; // We want to select tLDRpci instead.
1047 if (!CurDAG->isBaseWithConstantOffset(N)) {
1048 if (N.getOpcode() == ARMISD::Wrapper &&
1049 !(Subtarget->useMovt() &&
1050 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1051 Base = N.getOperand(0);
1056 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1060 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1061 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1062 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1063 (RHSR && RHSR->getReg() == ARM::SP)) {
1064 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
1065 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1066 unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
1067 unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
1069 // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
1070 if (LHSC != 0 || RHSC != 0) return false;
1073 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1077 // If the RHS is + imm5 * scale, fold into addr mode.
1079 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1080 Base = N.getOperand(0);
1081 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1085 Base = N.getOperand(0);
1086 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1091 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1093 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1097 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1099 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1103 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1105 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1108 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1109 SDValue &Base, SDValue &OffImm) {
1110 if (N.getOpcode() == ISD::FrameIndex) {
1111 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1112 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1113 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1117 if (!CurDAG->isBaseWithConstantOffset(N))
1120 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1121 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1122 (LHSR && LHSR->getReg() == ARM::SP)) {
1123 // If the RHS is + imm8 * scale, fold into addr mode.
1125 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1126 Base = N.getOperand(0);
1127 if (Base.getOpcode() == ISD::FrameIndex) {
1128 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1129 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1131 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1140 //===----------------------------------------------------------------------===//
1141 // Thumb 2 Addressing Modes
1142 //===----------------------------------------------------------------------===//
1145 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1147 if (DisableShifterOp)
1150 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
1152 // Don't match base register only case. That is matched to a separate
1153 // lower complexity pattern with explicit register operand.
1154 if (ShOpcVal == ARM_AM::no_shift) return false;
1156 BaseReg = N.getOperand(0);
1157 unsigned ShImmVal = 0;
1158 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1159 ShImmVal = RHS->getZExtValue() & 31;
1160 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
1167 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1168 SDValue &Base, SDValue &OffImm) {
1169 // Match simple R + imm12 operands.
1172 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1173 !CurDAG->isBaseWithConstantOffset(N)) {
1174 if (N.getOpcode() == ISD::FrameIndex) {
1175 // Match frame index.
1176 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1177 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1178 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1182 if (N.getOpcode() == ARMISD::Wrapper &&
1183 !(Subtarget->useMovt() &&
1184 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1185 Base = N.getOperand(0);
1186 if (Base.getOpcode() == ISD::TargetConstantPool)
1187 return false; // We want to select t2LDRpci instead.
1190 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1194 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1195 if (SelectT2AddrModeImm8(N, Base, OffImm))
1196 // Let t2LDRi8 handle (R - imm8).
1199 int RHSC = (int)RHS->getZExtValue();
1200 if (N.getOpcode() == ISD::SUB)
1203 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1204 Base = N.getOperand(0);
1205 if (Base.getOpcode() == ISD::FrameIndex) {
1206 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1207 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1209 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1216 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1220 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1221 SDValue &Base, SDValue &OffImm) {
1222 // Match simple R - imm8 operands.
1223 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1224 !CurDAG->isBaseWithConstantOffset(N))
1227 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1228 int RHSC = (int)RHS->getSExtValue();
1229 if (N.getOpcode() == ISD::SUB)
1232 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1233 Base = N.getOperand(0);
1234 if (Base.getOpcode() == ISD::FrameIndex) {
1235 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1236 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1238 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1246 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1248 unsigned Opcode = Op->getOpcode();
1249 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1250 ? cast<LoadSDNode>(Op)->getAddressingMode()
1251 : cast<StoreSDNode>(Op)->getAddressingMode();
1253 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1254 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1255 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
1256 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
1263 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1265 SDValue &OffReg, SDValue &ShImm) {
1266 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1267 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1270 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1271 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1272 int RHSC = (int)RHS->getZExtValue();
1273 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1275 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1279 if (Subtarget->isCortexA9() && !N.hasOneUse()) {
1280 // Compute R + (R << [1,2,3]) and reuse it.
1285 // Look for (R + R) or (R + (R << [1,2,3])).
1287 Base = N.getOperand(0);
1288 OffReg = N.getOperand(1);
1290 // Swap if it is ((R << c) + R).
1291 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1292 if (ShOpcVal != ARM_AM::lsl) {
1293 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1294 if (ShOpcVal == ARM_AM::lsl)
1295 std::swap(Base, OffReg);
1298 if (ShOpcVal == ARM_AM::lsl) {
1299 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1301 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1302 ShAmt = Sh->getZExtValue();
1303 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1304 OffReg = OffReg.getOperand(0);
1307 ShOpcVal = ARM_AM::no_shift;
1310 ShOpcVal = ARM_AM::no_shift;
1314 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
1319 //===--------------------------------------------------------------------===//
1321 /// getAL - Returns a ARMCC::AL immediate node.
1322 static inline SDValue getAL(SelectionDAG *CurDAG) {
1323 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
1326 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1327 LoadSDNode *LD = cast<LoadSDNode>(N);
1328 ISD::MemIndexedMode AM = LD->getAddressingMode();
1329 if (AM == ISD::UNINDEXED)
1332 EVT LoadedVT = LD->getMemoryVT();
1333 SDValue Offset, AMOpc;
1334 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1335 unsigned Opcode = 0;
1337 if (LoadedVT == MVT::i32 && isPre &&
1338 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1339 Opcode = ARM::LDR_PRE_IMM;
1341 } else if (LoadedVT == MVT::i32 && !isPre &&
1342 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1343 Opcode = ARM::LDR_POST_IMM;
1345 } else if (LoadedVT == MVT::i32 &&
1346 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1347 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1350 } else if (LoadedVT == MVT::i16 &&
1351 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1353 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1354 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1355 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1356 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1357 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1358 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1360 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1364 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1366 Opcode = ARM::LDRB_PRE_IMM;
1367 } else if (!isPre &&
1368 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1370 Opcode = ARM::LDRB_POST_IMM;
1371 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1373 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1379 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1380 SDValue Chain = LD->getChain();
1381 SDValue Base = LD->getBasePtr();
1382 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
1383 CurDAG->getRegister(0, MVT::i32), Chain };
1384 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
1385 MVT::Other, Ops, 5);
1387 SDValue Chain = LD->getChain();
1388 SDValue Base = LD->getBasePtr();
1389 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
1390 CurDAG->getRegister(0, MVT::i32), Chain };
1391 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
1392 MVT::Other, Ops, 6);
1399 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1400 LoadSDNode *LD = cast<LoadSDNode>(N);
1401 ISD::MemIndexedMode AM = LD->getAddressingMode();
1402 if (AM == ISD::UNINDEXED)
1405 EVT LoadedVT = LD->getMemoryVT();
1406 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1408 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1409 unsigned Opcode = 0;
1411 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1412 switch (LoadedVT.getSimpleVT().SimpleTy) {
1414 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1418 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1420 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1425 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1427 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1436 SDValue Chain = LD->getChain();
1437 SDValue Base = LD->getBasePtr();
1438 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
1439 CurDAG->getRegister(0, MVT::i32), Chain };
1440 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
1441 MVT::Other, Ops, 5);
1447 /// PairSRegs - Form a D register from a pair of S registers.
1449 SDNode *ARMDAGToDAGISel::PairSRegs(EVT VT, SDValue V0, SDValue V1) {
1450 DebugLoc dl = V0.getNode()->getDebugLoc();
1452 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
1453 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1454 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1455 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1456 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1459 /// PairDRegs - Form a quad register from a pair of D registers.
1461 SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) {
1462 DebugLoc dl = V0.getNode()->getDebugLoc();
1463 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
1464 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1465 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1466 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1467 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1470 /// PairQRegs - Form 4 consecutive D registers from a pair of Q registers.
1472 SDNode *ARMDAGToDAGISel::PairQRegs(EVT VT, SDValue V0, SDValue V1) {
1473 DebugLoc dl = V0.getNode()->getDebugLoc();
1474 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1475 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1476 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1477 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1478 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1481 /// QuadSRegs - Form 4 consecutive S registers.
1483 SDNode *ARMDAGToDAGISel::QuadSRegs(EVT VT, SDValue V0, SDValue V1,
1484 SDValue V2, SDValue V3) {
1485 DebugLoc dl = V0.getNode()->getDebugLoc();
1487 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
1488 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1489 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1490 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
1491 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
1492 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1493 V2, SubReg2, V3, SubReg3 };
1494 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1497 /// QuadDRegs - Form 4 consecutive D registers.
1499 SDNode *ARMDAGToDAGISel::QuadDRegs(EVT VT, SDValue V0, SDValue V1,
1500 SDValue V2, SDValue V3) {
1501 DebugLoc dl = V0.getNode()->getDebugLoc();
1502 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1503 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1504 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1505 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
1506 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
1507 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1508 V2, SubReg2, V3, SubReg3 };
1509 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1512 /// QuadQRegs - Form 4 consecutive Q registers.
1514 SDNode *ARMDAGToDAGISel::QuadQRegs(EVT VT, SDValue V0, SDValue V1,
1515 SDValue V2, SDValue V3) {
1516 DebugLoc dl = V0.getNode()->getDebugLoc();
1517 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
1518 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1519 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1520 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
1521 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
1522 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1523 V2, SubReg2, V3, SubReg3 };
1524 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1527 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1528 /// of a NEON VLD or VST instruction. The supported values depend on the
1529 /// number of registers being loaded.
1530 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
1531 bool is64BitVector) {
1532 unsigned NumRegs = NumVecs;
1533 if (!is64BitVector && NumVecs < 3)
1536 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1537 if (Alignment >= 32 && NumRegs == 4)
1539 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1541 else if (Alignment >= 8)
1546 return CurDAG->getTargetConstant(Alignment, MVT::i32);
1549 SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
1550 unsigned *DOpcodes, unsigned *QOpcodes0,
1551 unsigned *QOpcodes1) {
1552 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
1553 DebugLoc dl = N->getDebugLoc();
1555 SDValue MemAddr, Align;
1556 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1557 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1560 SDValue Chain = N->getOperand(0);
1561 EVT VT = N->getValueType(0);
1562 bool is64BitVector = VT.is64BitVector();
1563 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1565 unsigned OpcodeIndex;
1566 switch (VT.getSimpleVT().SimpleTy) {
1567 default: llvm_unreachable("unhandled vld type");
1568 // Double-register operations:
1569 case MVT::v8i8: OpcodeIndex = 0; break;
1570 case MVT::v4i16: OpcodeIndex = 1; break;
1572 case MVT::v2i32: OpcodeIndex = 2; break;
1573 case MVT::v1i64: OpcodeIndex = 3; break;
1574 // Quad-register operations:
1575 case MVT::v16i8: OpcodeIndex = 0; break;
1576 case MVT::v8i16: OpcodeIndex = 1; break;
1578 case MVT::v4i32: OpcodeIndex = 2; break;
1579 case MVT::v2i64: OpcodeIndex = 3;
1580 assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
1588 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1591 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1593 std::vector<EVT> ResTys;
1594 ResTys.push_back(ResTy);
1596 ResTys.push_back(MVT::i32);
1597 ResTys.push_back(MVT::Other);
1599 SDValue Pred = getAL(CurDAG);
1600 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1602 SmallVector<SDValue, 7> Ops;
1604 // Double registers and VLD1/VLD2 quad registers are directly supported.
1605 if (is64BitVector || NumVecs <= 2) {
1606 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1607 QOpcodes0[OpcodeIndex]);
1608 Ops.push_back(MemAddr);
1609 Ops.push_back(Align);
1611 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1612 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1614 Ops.push_back(Pred);
1615 Ops.push_back(Reg0);
1616 Ops.push_back(Chain);
1617 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1620 // Otherwise, quad registers are loaded with two separate instructions,
1621 // where one loads the even registers and the other loads the odd registers.
1622 EVT AddrTy = MemAddr.getValueType();
1624 // Load the even subregs. This is always an updating load, so that it
1625 // provides the address to the second load for the odd subregs.
1627 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1628 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1629 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1630 ResTy, AddrTy, MVT::Other, OpsA, 7);
1631 Chain = SDValue(VLdA, 2);
1633 // Load the odd subregs.
1634 Ops.push_back(SDValue(VLdA, 1));
1635 Ops.push_back(Align);
1637 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1638 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1639 "only constant post-increment update allowed for VLD3/4");
1641 Ops.push_back(Reg0);
1643 Ops.push_back(SDValue(VLdA, 0));
1644 Ops.push_back(Pred);
1645 Ops.push_back(Reg0);
1646 Ops.push_back(Chain);
1647 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1648 Ops.data(), Ops.size());
1651 // Transfer memoperands.
1652 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1653 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1654 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
1659 // Extract out the subregisters.
1660 SDValue SuperReg = SDValue(VLd, 0);
1661 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1662 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1663 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
1664 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1665 ReplaceUses(SDValue(N, Vec),
1666 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1667 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1669 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
1673 SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
1674 unsigned *DOpcodes, unsigned *QOpcodes0,
1675 unsigned *QOpcodes1) {
1676 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1677 DebugLoc dl = N->getDebugLoc();
1679 SDValue MemAddr, Align;
1680 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1681 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1682 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1685 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1686 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1688 SDValue Chain = N->getOperand(0);
1689 EVT VT = N->getOperand(Vec0Idx).getValueType();
1690 bool is64BitVector = VT.is64BitVector();
1691 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1693 unsigned OpcodeIndex;
1694 switch (VT.getSimpleVT().SimpleTy) {
1695 default: llvm_unreachable("unhandled vst type");
1696 // Double-register operations:
1697 case MVT::v8i8: OpcodeIndex = 0; break;
1698 case MVT::v4i16: OpcodeIndex = 1; break;
1700 case MVT::v2i32: OpcodeIndex = 2; break;
1701 case MVT::v1i64: OpcodeIndex = 3; break;
1702 // Quad-register operations:
1703 case MVT::v16i8: OpcodeIndex = 0; break;
1704 case MVT::v8i16: OpcodeIndex = 1; break;
1706 case MVT::v4i32: OpcodeIndex = 2; break;
1707 case MVT::v2i64: OpcodeIndex = 3;
1708 assert(NumVecs == 1 && "v2i64 type only supported for VST1");
1712 std::vector<EVT> ResTys;
1714 ResTys.push_back(MVT::i32);
1715 ResTys.push_back(MVT::Other);
1717 SDValue Pred = getAL(CurDAG);
1718 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1719 SmallVector<SDValue, 7> Ops;
1721 // Double registers and VST1/VST2 quad registers are directly supported.
1722 if (is64BitVector || NumVecs <= 2) {
1725 SrcReg = N->getOperand(Vec0Idx);
1726 } else if (is64BitVector) {
1727 // Form a REG_SEQUENCE to force register allocation.
1728 SDValue V0 = N->getOperand(Vec0Idx + 0);
1729 SDValue V1 = N->getOperand(Vec0Idx + 1);
1731 SrcReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1733 SDValue V2 = N->getOperand(Vec0Idx + 2);
1734 // If it's a vst3, form a quad D-register and leave the last part as
1736 SDValue V3 = (NumVecs == 3)
1737 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1738 : N->getOperand(Vec0Idx + 3);
1739 SrcReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1742 // Form a QQ register.
1743 SDValue Q0 = N->getOperand(Vec0Idx);
1744 SDValue Q1 = N->getOperand(Vec0Idx + 1);
1745 SrcReg = SDValue(PairQRegs(MVT::v4i64, Q0, Q1), 0);
1748 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1749 QOpcodes0[OpcodeIndex]);
1750 Ops.push_back(MemAddr);
1751 Ops.push_back(Align);
1753 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1754 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1756 Ops.push_back(SrcReg);
1757 Ops.push_back(Pred);
1758 Ops.push_back(Reg0);
1759 Ops.push_back(Chain);
1761 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1763 // Transfer memoperands.
1764 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
1769 // Otherwise, quad registers are stored with two separate instructions,
1770 // where one stores the even registers and the other stores the odd registers.
1772 // Form the QQQQ REG_SEQUENCE.
1773 SDValue V0 = N->getOperand(Vec0Idx + 0);
1774 SDValue V1 = N->getOperand(Vec0Idx + 1);
1775 SDValue V2 = N->getOperand(Vec0Idx + 2);
1776 SDValue V3 = (NumVecs == 3)
1777 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1778 : N->getOperand(Vec0Idx + 3);
1779 SDValue RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1781 // Store the even D registers. This is always an updating store, so that it
1782 // provides the address to the second store for the odd subregs.
1783 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
1784 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1785 MemAddr.getValueType(),
1786 MVT::Other, OpsA, 7);
1787 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
1788 Chain = SDValue(VStA, 1);
1790 // Store the odd D registers.
1791 Ops.push_back(SDValue(VStA, 0));
1792 Ops.push_back(Align);
1794 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1795 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1796 "only constant post-increment update allowed for VST3/4");
1798 Ops.push_back(Reg0);
1800 Ops.push_back(RegSeq);
1801 Ops.push_back(Pred);
1802 Ops.push_back(Reg0);
1803 Ops.push_back(Chain);
1804 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1805 Ops.data(), Ops.size());
1806 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
1810 SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
1811 bool isUpdating, unsigned NumVecs,
1813 unsigned *QOpcodes) {
1814 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
1815 DebugLoc dl = N->getDebugLoc();
1817 SDValue MemAddr, Align;
1818 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1819 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1820 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1823 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1824 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1826 SDValue Chain = N->getOperand(0);
1828 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
1829 EVT VT = N->getOperand(Vec0Idx).getValueType();
1830 bool is64BitVector = VT.is64BitVector();
1832 unsigned Alignment = 0;
1834 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1835 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
1836 if (Alignment > NumBytes)
1837 Alignment = NumBytes;
1838 if (Alignment < 8 && Alignment < NumBytes)
1840 // Alignment must be a power of two; make sure of that.
1841 Alignment = (Alignment & -Alignment);
1845 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1847 unsigned OpcodeIndex;
1848 switch (VT.getSimpleVT().SimpleTy) {
1849 default: llvm_unreachable("unhandled vld/vst lane type");
1850 // Double-register operations:
1851 case MVT::v8i8: OpcodeIndex = 0; break;
1852 case MVT::v4i16: OpcodeIndex = 1; break;
1854 case MVT::v2i32: OpcodeIndex = 2; break;
1855 // Quad-register operations:
1856 case MVT::v8i16: OpcodeIndex = 0; break;
1858 case MVT::v4i32: OpcodeIndex = 1; break;
1861 std::vector<EVT> ResTys;
1863 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1866 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
1867 MVT::i64, ResTyElts));
1870 ResTys.push_back(MVT::i32);
1871 ResTys.push_back(MVT::Other);
1873 SDValue Pred = getAL(CurDAG);
1874 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1876 SmallVector<SDValue, 8> Ops;
1877 Ops.push_back(MemAddr);
1878 Ops.push_back(Align);
1880 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1881 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1885 SDValue V0 = N->getOperand(Vec0Idx + 0);
1886 SDValue V1 = N->getOperand(Vec0Idx + 1);
1889 SuperReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1891 SuperReg = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
1893 SDValue V2 = N->getOperand(Vec0Idx + 2);
1894 SDValue V3 = (NumVecs == 3)
1895 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1896 : N->getOperand(Vec0Idx + 3);
1898 SuperReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1900 SuperReg = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1902 Ops.push_back(SuperReg);
1903 Ops.push_back(getI32Imm(Lane));
1904 Ops.push_back(Pred);
1905 Ops.push_back(Reg0);
1906 Ops.push_back(Chain);
1908 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1909 QOpcodes[OpcodeIndex]);
1910 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys,
1911 Ops.data(), Ops.size());
1912 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
1916 // Extract the subregisters.
1917 SuperReg = SDValue(VLdLn, 0);
1918 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1919 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1920 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
1921 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1922 ReplaceUses(SDValue(N, Vec),
1923 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1924 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
1926 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
1930 SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
1931 unsigned NumVecs, unsigned *Opcodes) {
1932 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
1933 DebugLoc dl = N->getDebugLoc();
1935 SDValue MemAddr, Align;
1936 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
1939 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1940 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1942 SDValue Chain = N->getOperand(0);
1943 EVT VT = N->getValueType(0);
1945 unsigned Alignment = 0;
1947 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1948 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
1949 if (Alignment > NumBytes)
1950 Alignment = NumBytes;
1951 if (Alignment < 8 && Alignment < NumBytes)
1953 // Alignment must be a power of two; make sure of that.
1954 Alignment = (Alignment & -Alignment);
1958 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1960 unsigned OpcodeIndex;
1961 switch (VT.getSimpleVT().SimpleTy) {
1962 default: llvm_unreachable("unhandled vld-dup type");
1963 case MVT::v8i8: OpcodeIndex = 0; break;
1964 case MVT::v4i16: OpcodeIndex = 1; break;
1966 case MVT::v2i32: OpcodeIndex = 2; break;
1969 SDValue Pred = getAL(CurDAG);
1970 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1972 unsigned Opc = Opcodes[OpcodeIndex];
1973 SmallVector<SDValue, 6> Ops;
1974 Ops.push_back(MemAddr);
1975 Ops.push_back(Align);
1977 SDValue Inc = N->getOperand(2);
1978 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1980 Ops.push_back(Pred);
1981 Ops.push_back(Reg0);
1982 Ops.push_back(Chain);
1984 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1985 std::vector<EVT> ResTys;
1986 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
1988 ResTys.push_back(MVT::i32);
1989 ResTys.push_back(MVT::Other);
1991 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1992 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
1993 SuperReg = SDValue(VLdDup, 0);
1995 // Extract the subregisters.
1996 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1997 unsigned SubIdx = ARM::dsub_0;
1998 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1999 ReplaceUses(SDValue(N, Vec),
2000 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
2001 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
2003 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
2007 SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
2009 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
2010 DebugLoc dl = N->getDebugLoc();
2011 EVT VT = N->getValueType(0);
2012 unsigned FirstTblReg = IsExt ? 2 : 1;
2014 // Form a REG_SEQUENCE to force register allocation.
2016 SDValue V0 = N->getOperand(FirstTblReg + 0);
2017 SDValue V1 = N->getOperand(FirstTblReg + 1);
2019 RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
2021 SDValue V2 = N->getOperand(FirstTblReg + 2);
2022 // If it's a vtbl3, form a quad D-register and leave the last part as
2024 SDValue V3 = (NumVecs == 3)
2025 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2026 : N->getOperand(FirstTblReg + 3);
2027 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
2030 SmallVector<SDValue, 6> Ops;
2032 Ops.push_back(N->getOperand(1));
2033 Ops.push_back(RegSeq);
2034 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
2035 Ops.push_back(getAL(CurDAG)); // predicate
2036 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
2037 return CurDAG->getMachineNode(Opc, dl, VT, Ops.data(), Ops.size());
2040 SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
2042 if (!Subtarget->hasV6T2Ops())
2045 unsigned Opc = isSigned ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
2046 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
2049 // For unsigned extracts, check for a shift right and mask
2050 unsigned And_imm = 0;
2051 if (N->getOpcode() == ISD::AND) {
2052 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
2054 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2055 if (And_imm & (And_imm + 1))
2058 unsigned Srl_imm = 0;
2059 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
2061 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2063 // Note: The width operand is encoded as width-1.
2064 unsigned Width = CountTrailingOnes_32(And_imm) - 1;
2065 unsigned LSB = Srl_imm;
2066 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2067 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2068 CurDAG->getTargetConstant(LSB, MVT::i32),
2069 CurDAG->getTargetConstant(Width, MVT::i32),
2070 getAL(CurDAG), Reg0 };
2071 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2077 // Otherwise, we're looking for a shift of a shift
2078 unsigned Shl_imm = 0;
2079 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
2080 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
2081 unsigned Srl_imm = 0;
2082 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
2083 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2084 // Note: The width operand is encoded as width-1.
2085 unsigned Width = 32 - Srl_imm - 1;
2086 int LSB = Srl_imm - Shl_imm;
2089 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2090 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2091 CurDAG->getTargetConstant(LSB, MVT::i32),
2092 CurDAG->getTargetConstant(Width, MVT::i32),
2093 getAL(CurDAG), Reg0 };
2094 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2100 SDNode *ARMDAGToDAGISel::
2101 SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2102 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2105 if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
2106 unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue();
2107 unsigned SOShOp = ARM_AM::getSORegShOp(SOVal);
2110 case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break;
2111 case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break;
2112 case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break;
2113 case ARM_AM::ror: Opc = ARM::t2MOVCCror; break;
2115 llvm_unreachable("Unknown so_reg opcode!");
2119 CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
2120 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2121 SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag };
2122 return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6);
2127 SDNode *ARMDAGToDAGISel::
2128 SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2129 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2133 if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) {
2134 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2135 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp2, CC, CCR, InFlag };
2136 return CurDAG->SelectNodeTo(N, ARM::MOVCCsi, MVT::i32, Ops, 6);
2139 if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
2140 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2141 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag };
2142 return CurDAG->SelectNodeTo(N, ARM::MOVCCsr, MVT::i32, Ops, 7);
2147 SDNode *ARMDAGToDAGISel::
2148 SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2149 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2150 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2155 unsigned TrueImm = T->getZExtValue();
2156 if (is_t2_so_imm(TrueImm)) {
2157 Opc = ARM::t2MOVCCi;
2158 } else if (TrueImm <= 0xffff) {
2159 Opc = ARM::t2MOVCCi16;
2160 } else if (is_t2_so_imm_not(TrueImm)) {
2162 Opc = ARM::t2MVNCCi;
2163 } else if (TrueVal.getNode()->hasOneUse() && Subtarget->hasV6T2Ops()) {
2165 Opc = ARM::t2MOVCCi32imm;
2169 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2170 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2171 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2172 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2178 SDNode *ARMDAGToDAGISel::
2179 SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2180 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2181 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2186 unsigned TrueImm = T->getZExtValue();
2187 bool isSoImm = is_so_imm(TrueImm);
2190 } else if (Subtarget->hasV6T2Ops() && TrueImm <= 0xffff) {
2191 Opc = ARM::MOVCCi16;
2192 } else if (is_so_imm_not(TrueImm)) {
2195 } else if (TrueVal.getNode()->hasOneUse() &&
2196 (Subtarget->hasV6T2Ops() || ARM_AM::isSOImmTwoPartVal(TrueImm))) {
2198 Opc = ARM::MOVCCi32imm;
2202 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2203 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2204 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2205 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2211 SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
2212 EVT VT = N->getValueType(0);
2213 SDValue FalseVal = N->getOperand(0);
2214 SDValue TrueVal = N->getOperand(1);
2215 SDValue CC = N->getOperand(2);
2216 SDValue CCR = N->getOperand(3);
2217 SDValue InFlag = N->getOperand(4);
2218 assert(CC.getOpcode() == ISD::Constant);
2219 assert(CCR.getOpcode() == ISD::Register);
2220 ARMCC::CondCodes CCVal =
2221 (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue();
2223 if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
2224 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2225 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2226 // Pattern complexity = 18 cost = 1 size = 0
2230 if (Subtarget->isThumb()) {
2231 SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal,
2232 CCVal, CCR, InFlag);
2234 Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal,
2235 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2239 SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal,
2240 CCVal, CCR, InFlag);
2242 Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal,
2243 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2248 // Pattern: (ARMcmov:i32 GPR:i32:$false,
2249 // (imm:i32)<<P:Pred_so_imm>>:$true,
2251 // Emits: (MOVCCi:i32 GPR:i32:$false,
2252 // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc)
2253 // Pattern complexity = 10 cost = 1 size = 0
2254 if (Subtarget->isThumb()) {
2255 SDNode *Res = SelectT2CMOVImmOp(N, FalseVal, TrueVal,
2256 CCVal, CCR, InFlag);
2258 Res = SelectT2CMOVImmOp(N, TrueVal, FalseVal,
2259 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2263 SDNode *Res = SelectARMCMOVImmOp(N, FalseVal, TrueVal,
2264 CCVal, CCR, InFlag);
2266 Res = SelectARMCMOVImmOp(N, TrueVal, FalseVal,
2267 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2273 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2274 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2275 // Pattern complexity = 6 cost = 1 size = 0
2277 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2278 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2279 // Pattern complexity = 6 cost = 11 size = 0
2281 // Also VMOVScc and VMOVDcc.
2282 SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32);
2283 SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag };
2285 switch (VT.getSimpleVT().SimpleTy) {
2286 default: assert(false && "Illegal conditional move type!");
2289 Opc = Subtarget->isThumb()
2290 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo)
2300 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
2303 SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2304 // The only time a CONCAT_VECTORS operation can have legal types is when
2305 // two 64-bit vectors are concatenated to a 128-bit vector.
2306 EVT VT = N->getValueType(0);
2307 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2308 llvm_unreachable("unexpected CONCAT_VECTORS");
2309 return PairDRegs(VT, N->getOperand(0), N->getOperand(1));
2312 SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2313 DebugLoc dl = N->getDebugLoc();
2315 if (N->isMachineOpcode())
2316 return NULL; // Already selected.
2318 switch (N->getOpcode()) {
2320 case ISD::Constant: {
2321 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2323 if (Subtarget->hasThumb2())
2324 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2325 // be done with MOV + MOVT, at worst.
2328 if (Subtarget->isThumb()) {
2329 UseCP = (Val > 255 && // MOV
2330 ~Val > 255 && // MOV + MVN
2331 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
2333 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2334 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2335 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
2340 CurDAG->getTargetConstantPool(ConstantInt::get(
2341 Type::getInt32Ty(*CurDAG->getContext()), Val),
2342 TLI.getPointerTy());
2345 if (Subtarget->isThumb1Only()) {
2346 SDValue Pred = getAL(CurDAG);
2347 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2348 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2349 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2354 CurDAG->getTargetConstant(0, MVT::i32),
2356 CurDAG->getRegister(0, MVT::i32),
2357 CurDAG->getEntryNode()
2359 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2362 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2366 // Other cases are autogenerated.
2369 case ISD::FrameIndex: {
2370 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2371 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2372 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
2373 if (Subtarget->isThumb1Only()) {
2374 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2375 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2376 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops, 4);
2378 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2379 ARM::t2ADDri : ARM::ADDri);
2380 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2381 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2382 CurDAG->getRegister(0, MVT::i32) };
2383 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2387 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2391 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2395 if (Subtarget->isThumb1Only())
2397 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2398 unsigned RHSV = C->getZExtValue();
2400 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2401 unsigned ShImm = Log2_32(RHSV-1);
2404 SDValue V = N->getOperand(0);
2405 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2406 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2407 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2408 if (Subtarget->isThumb()) {
2409 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2410 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
2412 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2413 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops, 7);
2416 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2417 unsigned ShImm = Log2_32(RHSV+1);
2420 SDValue V = N->getOperand(0);
2421 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2422 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2423 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2424 if (Subtarget->isThumb()) {
2425 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2426 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6);
2428 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2429 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops, 7);
2435 // Check for unsigned bitfield extract
2436 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2439 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2440 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2441 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2442 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2443 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2444 EVT VT = N->getValueType(0);
2447 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2449 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2452 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2453 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2456 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2457 SDValue N2 = N0.getOperand(1);
2458 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2461 unsigned N1CVal = N1C->getZExtValue();
2462 unsigned N2CVal = N2C->getZExtValue();
2463 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2464 (N1CVal & 0xffffU) == 0xffffU &&
2465 (N2CVal & 0xffffU) == 0x0U) {
2466 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2468 SDValue Ops[] = { N0.getOperand(0), Imm16,
2469 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2470 return CurDAG->getMachineNode(Opc, dl, VT, Ops, 4);
2475 case ARMISD::VMOVRRD:
2476 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2477 N->getOperand(0), getAL(CurDAG),
2478 CurDAG->getRegister(0, MVT::i32));
2479 case ISD::UMUL_LOHI: {
2480 if (Subtarget->isThumb1Only())
2482 if (Subtarget->isThumb()) {
2483 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2484 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2485 CurDAG->getRegister(0, MVT::i32) };
2486 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32,Ops,4);
2488 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2489 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2490 CurDAG->getRegister(0, MVT::i32) };
2491 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2492 ARM::UMULL : ARM::UMULLv5,
2493 dl, MVT::i32, MVT::i32, Ops, 5);
2496 case ISD::SMUL_LOHI: {
2497 if (Subtarget->isThumb1Only())
2499 if (Subtarget->isThumb()) {
2500 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2501 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2502 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32,Ops,4);
2504 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2505 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2506 CurDAG->getRegister(0, MVT::i32) };
2507 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2508 ARM::SMULL : ARM::SMULLv5,
2509 dl, MVT::i32, MVT::i32, Ops, 5);
2513 SDNode *ResNode = 0;
2514 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2515 ResNode = SelectT2IndexedLoad(N);
2517 ResNode = SelectARMIndexedLoad(N);
2520 // Other cases are autogenerated.
2523 case ARMISD::BRCOND: {
2524 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2525 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2526 // Pattern complexity = 6 cost = 1 size = 0
2528 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2529 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2530 // Pattern complexity = 6 cost = 1 size = 0
2532 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2533 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2534 // Pattern complexity = 6 cost = 1 size = 0
2536 unsigned Opc = Subtarget->isThumb() ?
2537 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2538 SDValue Chain = N->getOperand(0);
2539 SDValue N1 = N->getOperand(1);
2540 SDValue N2 = N->getOperand(2);
2541 SDValue N3 = N->getOperand(3);
2542 SDValue InFlag = N->getOperand(4);
2543 assert(N1.getOpcode() == ISD::BasicBlock);
2544 assert(N2.getOpcode() == ISD::Constant);
2545 assert(N3.getOpcode() == ISD::Register);
2547 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2548 cast<ConstantSDNode>(N2)->getZExtValue()),
2550 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2551 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2553 Chain = SDValue(ResNode, 0);
2554 if (N->getNumValues() == 2) {
2555 InFlag = SDValue(ResNode, 1);
2556 ReplaceUses(SDValue(N, 1), InFlag);
2558 ReplaceUses(SDValue(N, 0),
2559 SDValue(Chain.getNode(), Chain.getResNo()));
2563 return SelectCMOVOp(N);
2564 case ARMISD::VZIP: {
2566 EVT VT = N->getValueType(0);
2567 switch (VT.getSimpleVT().SimpleTy) {
2568 default: return NULL;
2569 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2570 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2572 case MVT::v2i32: Opc = ARM::VZIPd32; break;
2573 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2574 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2576 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2578 SDValue Pred = getAL(CurDAG);
2579 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2580 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2581 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2583 case ARMISD::VUZP: {
2585 EVT VT = N->getValueType(0);
2586 switch (VT.getSimpleVT().SimpleTy) {
2587 default: return NULL;
2588 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2589 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2591 case MVT::v2i32: Opc = ARM::VUZPd32; break;
2592 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2593 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2595 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2597 SDValue Pred = getAL(CurDAG);
2598 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2599 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2600 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2602 case ARMISD::VTRN: {
2604 EVT VT = N->getValueType(0);
2605 switch (VT.getSimpleVT().SimpleTy) {
2606 default: return NULL;
2607 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2608 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2610 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2611 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2612 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2614 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2616 SDValue Pred = getAL(CurDAG);
2617 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2618 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2619 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2621 case ARMISD::BUILD_VECTOR: {
2622 EVT VecVT = N->getValueType(0);
2623 EVT EltVT = VecVT.getVectorElementType();
2624 unsigned NumElts = VecVT.getVectorNumElements();
2625 if (EltVT == MVT::f64) {
2626 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
2627 return PairDRegs(VecVT, N->getOperand(0), N->getOperand(1));
2629 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
2631 return PairSRegs(VecVT, N->getOperand(0), N->getOperand(1));
2632 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
2633 return QuadSRegs(VecVT, N->getOperand(0), N->getOperand(1),
2634 N->getOperand(2), N->getOperand(3));
2637 case ARMISD::VLD2DUP: {
2638 unsigned Opcodes[] = { ARM::VLD2DUPd8Pseudo, ARM::VLD2DUPd16Pseudo,
2639 ARM::VLD2DUPd32Pseudo };
2640 return SelectVLDDup(N, false, 2, Opcodes);
2643 case ARMISD::VLD3DUP: {
2644 unsigned Opcodes[] = { ARM::VLD3DUPd8Pseudo, ARM::VLD3DUPd16Pseudo,
2645 ARM::VLD3DUPd32Pseudo };
2646 return SelectVLDDup(N, false, 3, Opcodes);
2649 case ARMISD::VLD4DUP: {
2650 unsigned Opcodes[] = { ARM::VLD4DUPd8Pseudo, ARM::VLD4DUPd16Pseudo,
2651 ARM::VLD4DUPd32Pseudo };
2652 return SelectVLDDup(N, false, 4, Opcodes);
2655 case ARMISD::VLD2DUP_UPD: {
2656 unsigned Opcodes[] = { ARM::VLD2DUPd8Pseudo_UPD, ARM::VLD2DUPd16Pseudo_UPD,
2657 ARM::VLD2DUPd32Pseudo_UPD };
2658 return SelectVLDDup(N, true, 2, Opcodes);
2661 case ARMISD::VLD3DUP_UPD: {
2662 unsigned Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD, ARM::VLD3DUPd16Pseudo_UPD,
2663 ARM::VLD3DUPd32Pseudo_UPD };
2664 return SelectVLDDup(N, true, 3, Opcodes);
2667 case ARMISD::VLD4DUP_UPD: {
2668 unsigned Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD, ARM::VLD4DUPd16Pseudo_UPD,
2669 ARM::VLD4DUPd32Pseudo_UPD };
2670 return SelectVLDDup(N, true, 4, Opcodes);
2673 case ARMISD::VLD1_UPD: {
2674 unsigned DOpcodes[] = { ARM::VLD1d8_UPD, ARM::VLD1d16_UPD,
2675 ARM::VLD1d32_UPD, ARM::VLD1d64_UPD };
2676 unsigned QOpcodes[] = { ARM::VLD1q8Pseudo_UPD, ARM::VLD1q16Pseudo_UPD,
2677 ARM::VLD1q32Pseudo_UPD, ARM::VLD1q64Pseudo_UPD };
2678 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
2681 case ARMISD::VLD2_UPD: {
2682 unsigned DOpcodes[] = { ARM::VLD2d8Pseudo_UPD, ARM::VLD2d16Pseudo_UPD,
2683 ARM::VLD2d32Pseudo_UPD, ARM::VLD1q64Pseudo_UPD };
2684 unsigned QOpcodes[] = { ARM::VLD2q8Pseudo_UPD, ARM::VLD2q16Pseudo_UPD,
2685 ARM::VLD2q32Pseudo_UPD };
2686 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
2689 case ARMISD::VLD3_UPD: {
2690 unsigned DOpcodes[] = { ARM::VLD3d8Pseudo_UPD, ARM::VLD3d16Pseudo_UPD,
2691 ARM::VLD3d32Pseudo_UPD, ARM::VLD1d64TPseudo_UPD };
2692 unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2693 ARM::VLD3q16Pseudo_UPD,
2694 ARM::VLD3q32Pseudo_UPD };
2695 unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
2696 ARM::VLD3q16oddPseudo_UPD,
2697 ARM::VLD3q32oddPseudo_UPD };
2698 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2701 case ARMISD::VLD4_UPD: {
2702 unsigned DOpcodes[] = { ARM::VLD4d8Pseudo_UPD, ARM::VLD4d16Pseudo_UPD,
2703 ARM::VLD4d32Pseudo_UPD, ARM::VLD1d64QPseudo_UPD };
2704 unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2705 ARM::VLD4q16Pseudo_UPD,
2706 ARM::VLD4q32Pseudo_UPD };
2707 unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
2708 ARM::VLD4q16oddPseudo_UPD,
2709 ARM::VLD4q32oddPseudo_UPD };
2710 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2713 case ARMISD::VLD2LN_UPD: {
2714 unsigned DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD, ARM::VLD2LNd16Pseudo_UPD,
2715 ARM::VLD2LNd32Pseudo_UPD };
2716 unsigned QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
2717 ARM::VLD2LNq32Pseudo_UPD };
2718 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
2721 case ARMISD::VLD3LN_UPD: {
2722 unsigned DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD, ARM::VLD3LNd16Pseudo_UPD,
2723 ARM::VLD3LNd32Pseudo_UPD };
2724 unsigned QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
2725 ARM::VLD3LNq32Pseudo_UPD };
2726 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
2729 case ARMISD::VLD4LN_UPD: {
2730 unsigned DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD, ARM::VLD4LNd16Pseudo_UPD,
2731 ARM::VLD4LNd32Pseudo_UPD };
2732 unsigned QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
2733 ARM::VLD4LNq32Pseudo_UPD };
2734 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
2737 case ARMISD::VST1_UPD: {
2738 unsigned DOpcodes[] = { ARM::VST1d8_UPD, ARM::VST1d16_UPD,
2739 ARM::VST1d32_UPD, ARM::VST1d64_UPD };
2740 unsigned QOpcodes[] = { ARM::VST1q8Pseudo_UPD, ARM::VST1q16Pseudo_UPD,
2741 ARM::VST1q32Pseudo_UPD, ARM::VST1q64Pseudo_UPD };
2742 return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
2745 case ARMISD::VST2_UPD: {
2746 unsigned DOpcodes[] = { ARM::VST2d8Pseudo_UPD, ARM::VST2d16Pseudo_UPD,
2747 ARM::VST2d32Pseudo_UPD, ARM::VST1q64Pseudo_UPD };
2748 unsigned QOpcodes[] = { ARM::VST2q8Pseudo_UPD, ARM::VST2q16Pseudo_UPD,
2749 ARM::VST2q32Pseudo_UPD };
2750 return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
2753 case ARMISD::VST3_UPD: {
2754 unsigned DOpcodes[] = { ARM::VST3d8Pseudo_UPD, ARM::VST3d16Pseudo_UPD,
2755 ARM::VST3d32Pseudo_UPD, ARM::VST1d64TPseudo_UPD };
2756 unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
2757 ARM::VST3q16Pseudo_UPD,
2758 ARM::VST3q32Pseudo_UPD };
2759 unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
2760 ARM::VST3q16oddPseudo_UPD,
2761 ARM::VST3q32oddPseudo_UPD };
2762 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2765 case ARMISD::VST4_UPD: {
2766 unsigned DOpcodes[] = { ARM::VST4d8Pseudo_UPD, ARM::VST4d16Pseudo_UPD,
2767 ARM::VST4d32Pseudo_UPD, ARM::VST1d64QPseudo_UPD };
2768 unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
2769 ARM::VST4q16Pseudo_UPD,
2770 ARM::VST4q32Pseudo_UPD };
2771 unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
2772 ARM::VST4q16oddPseudo_UPD,
2773 ARM::VST4q32oddPseudo_UPD };
2774 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2777 case ARMISD::VST2LN_UPD: {
2778 unsigned DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD, ARM::VST2LNd16Pseudo_UPD,
2779 ARM::VST2LNd32Pseudo_UPD };
2780 unsigned QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
2781 ARM::VST2LNq32Pseudo_UPD };
2782 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
2785 case ARMISD::VST3LN_UPD: {
2786 unsigned DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD, ARM::VST3LNd16Pseudo_UPD,
2787 ARM::VST3LNd32Pseudo_UPD };
2788 unsigned QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
2789 ARM::VST3LNq32Pseudo_UPD };
2790 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
2793 case ARMISD::VST4LN_UPD: {
2794 unsigned DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD, ARM::VST4LNd16Pseudo_UPD,
2795 ARM::VST4LNd32Pseudo_UPD };
2796 unsigned QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
2797 ARM::VST4LNq32Pseudo_UPD };
2798 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
2801 case ISD::INTRINSIC_VOID:
2802 case ISD::INTRINSIC_W_CHAIN: {
2803 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2808 case Intrinsic::arm_ldrexd: {
2809 SDValue MemAddr = N->getOperand(2);
2810 DebugLoc dl = N->getDebugLoc();
2811 SDValue Chain = N->getOperand(0);
2813 unsigned NewOpc = ARM::LDREXD;
2814 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2815 NewOpc = ARM::t2LDREXD;
2817 // arm_ldrexd returns a i64 value in {i32, i32}
2818 std::vector<EVT> ResTys;
2819 ResTys.push_back(MVT::i32);
2820 ResTys.push_back(MVT::i32);
2821 ResTys.push_back(MVT::Other);
2823 // place arguments in the right order
2824 SmallVector<SDValue, 7> Ops;
2825 Ops.push_back(MemAddr);
2826 Ops.push_back(getAL(CurDAG));
2827 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2828 Ops.push_back(Chain);
2829 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(),
2831 // Transfer memoperands.
2832 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2833 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2834 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2836 // Until there's support for specifing explicit register constraints
2837 // like the use of even/odd register pair, hardcode ldrexd to always
2838 // use the pair [R0, R1] to hold the load result.
2839 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R0,
2840 SDValue(Ld, 0), SDValue(0,0));
2841 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R1,
2842 SDValue(Ld, 1), Chain.getValue(1));
2845 SDValue Glue = Chain.getValue(1);
2846 if (!SDValue(N, 0).use_empty()) {
2847 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2848 ARM::R0, MVT::i32, Glue);
2849 Glue = Result.getValue(2);
2850 ReplaceUses(SDValue(N, 0), Result);
2852 if (!SDValue(N, 1).use_empty()) {
2853 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2854 ARM::R1, MVT::i32, Glue);
2855 Glue = Result.getValue(2);
2856 ReplaceUses(SDValue(N, 1), Result);
2859 ReplaceUses(SDValue(N, 2), SDValue(Ld, 2));
2863 case Intrinsic::arm_strexd: {
2864 DebugLoc dl = N->getDebugLoc();
2865 SDValue Chain = N->getOperand(0);
2866 SDValue Val0 = N->getOperand(2);
2867 SDValue Val1 = N->getOperand(3);
2868 SDValue MemAddr = N->getOperand(4);
2870 // Until there's support for specifing explicit register constraints
2871 // like the use of even/odd register pair, hardcode strexd to always
2872 // use the pair [R2, R3] to hold the i64 (i32, i32) value to be stored.
2873 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R2, Val0,
2875 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R3, Val1, Chain.getValue(1));
2877 SDValue Glue = Chain.getValue(1);
2878 Val0 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2879 ARM::R2, MVT::i32, Glue);
2880 Glue = Val0.getValue(1);
2881 Val1 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2882 ARM::R3, MVT::i32, Glue);
2884 // Store exclusive double return a i32 value which is the return status
2885 // of the issued store.
2886 std::vector<EVT> ResTys;
2887 ResTys.push_back(MVT::i32);
2888 ResTys.push_back(MVT::Other);
2890 // place arguments in the right order
2891 SmallVector<SDValue, 7> Ops;
2892 Ops.push_back(Val0);
2893 Ops.push_back(Val1);
2894 Ops.push_back(MemAddr);
2895 Ops.push_back(getAL(CurDAG));
2896 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
2897 Ops.push_back(Chain);
2899 unsigned NewOpc = ARM::STREXD;
2900 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2901 NewOpc = ARM::t2STREXD;
2903 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(),
2905 // Transfer memoperands.
2906 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2907 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2908 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2913 case Intrinsic::arm_neon_vld1: {
2914 unsigned DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
2915 ARM::VLD1d32, ARM::VLD1d64 };
2916 unsigned QOpcodes[] = { ARM::VLD1q8Pseudo, ARM::VLD1q16Pseudo,
2917 ARM::VLD1q32Pseudo, ARM::VLD1q64Pseudo };
2918 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
2921 case Intrinsic::arm_neon_vld2: {
2922 unsigned DOpcodes[] = { ARM::VLD2d8Pseudo, ARM::VLD2d16Pseudo,
2923 ARM::VLD2d32Pseudo, ARM::VLD1q64Pseudo };
2924 unsigned QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
2925 ARM::VLD2q32Pseudo };
2926 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
2929 case Intrinsic::arm_neon_vld3: {
2930 unsigned DOpcodes[] = { ARM::VLD3d8Pseudo, ARM::VLD3d16Pseudo,
2931 ARM::VLD3d32Pseudo, ARM::VLD1d64TPseudo };
2932 unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2933 ARM::VLD3q16Pseudo_UPD,
2934 ARM::VLD3q32Pseudo_UPD };
2935 unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo,
2936 ARM::VLD3q16oddPseudo,
2937 ARM::VLD3q32oddPseudo };
2938 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
2941 case Intrinsic::arm_neon_vld4: {
2942 unsigned DOpcodes[] = { ARM::VLD4d8Pseudo, ARM::VLD4d16Pseudo,
2943 ARM::VLD4d32Pseudo, ARM::VLD1d64QPseudo };
2944 unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2945 ARM::VLD4q16Pseudo_UPD,
2946 ARM::VLD4q32Pseudo_UPD };
2947 unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo,
2948 ARM::VLD4q16oddPseudo,
2949 ARM::VLD4q32oddPseudo };
2950 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
2953 case Intrinsic::arm_neon_vld2lane: {
2954 unsigned DOpcodes[] = { ARM::VLD2LNd8Pseudo, ARM::VLD2LNd16Pseudo,
2955 ARM::VLD2LNd32Pseudo };
2956 unsigned QOpcodes[] = { ARM::VLD2LNq16Pseudo, ARM::VLD2LNq32Pseudo };
2957 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
2960 case Intrinsic::arm_neon_vld3lane: {
2961 unsigned DOpcodes[] = { ARM::VLD3LNd8Pseudo, ARM::VLD3LNd16Pseudo,
2962 ARM::VLD3LNd32Pseudo };
2963 unsigned QOpcodes[] = { ARM::VLD3LNq16Pseudo, ARM::VLD3LNq32Pseudo };
2964 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
2967 case Intrinsic::arm_neon_vld4lane: {
2968 unsigned DOpcodes[] = { ARM::VLD4LNd8Pseudo, ARM::VLD4LNd16Pseudo,
2969 ARM::VLD4LNd32Pseudo };
2970 unsigned QOpcodes[] = { ARM::VLD4LNq16Pseudo, ARM::VLD4LNq32Pseudo };
2971 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
2974 case Intrinsic::arm_neon_vst1: {
2975 unsigned DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
2976 ARM::VST1d32, ARM::VST1d64 };
2977 unsigned QOpcodes[] = { ARM::VST1q8Pseudo, ARM::VST1q16Pseudo,
2978 ARM::VST1q32Pseudo, ARM::VST1q64Pseudo };
2979 return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
2982 case Intrinsic::arm_neon_vst2: {
2983 unsigned DOpcodes[] = { ARM::VST2d8Pseudo, ARM::VST2d16Pseudo,
2984 ARM::VST2d32Pseudo, ARM::VST1q64Pseudo };
2985 unsigned QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
2986 ARM::VST2q32Pseudo };
2987 return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
2990 case Intrinsic::arm_neon_vst3: {
2991 unsigned DOpcodes[] = { ARM::VST3d8Pseudo, ARM::VST3d16Pseudo,
2992 ARM::VST3d32Pseudo, ARM::VST1d64TPseudo };
2993 unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
2994 ARM::VST3q16Pseudo_UPD,
2995 ARM::VST3q32Pseudo_UPD };
2996 unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo,
2997 ARM::VST3q16oddPseudo,
2998 ARM::VST3q32oddPseudo };
2999 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3002 case Intrinsic::arm_neon_vst4: {
3003 unsigned DOpcodes[] = { ARM::VST4d8Pseudo, ARM::VST4d16Pseudo,
3004 ARM::VST4d32Pseudo, ARM::VST1d64QPseudo };
3005 unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3006 ARM::VST4q16Pseudo_UPD,
3007 ARM::VST4q32Pseudo_UPD };
3008 unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo,
3009 ARM::VST4q16oddPseudo,
3010 ARM::VST4q32oddPseudo };
3011 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3014 case Intrinsic::arm_neon_vst2lane: {
3015 unsigned DOpcodes[] = { ARM::VST2LNd8Pseudo, ARM::VST2LNd16Pseudo,
3016 ARM::VST2LNd32Pseudo };
3017 unsigned QOpcodes[] = { ARM::VST2LNq16Pseudo, ARM::VST2LNq32Pseudo };
3018 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
3021 case Intrinsic::arm_neon_vst3lane: {
3022 unsigned DOpcodes[] = { ARM::VST3LNd8Pseudo, ARM::VST3LNd16Pseudo,
3023 ARM::VST3LNd32Pseudo };
3024 unsigned QOpcodes[] = { ARM::VST3LNq16Pseudo, ARM::VST3LNq32Pseudo };
3025 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
3028 case Intrinsic::arm_neon_vst4lane: {
3029 unsigned DOpcodes[] = { ARM::VST4LNd8Pseudo, ARM::VST4LNd16Pseudo,
3030 ARM::VST4LNd32Pseudo };
3031 unsigned QOpcodes[] = { ARM::VST4LNq16Pseudo, ARM::VST4LNq32Pseudo };
3032 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
3038 case ISD::INTRINSIC_WO_CHAIN: {
3039 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3044 case Intrinsic::arm_neon_vtbl2:
3045 return SelectVTBL(N, false, 2, ARM::VTBL2Pseudo);
3046 case Intrinsic::arm_neon_vtbl3:
3047 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
3048 case Intrinsic::arm_neon_vtbl4:
3049 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
3051 case Intrinsic::arm_neon_vtbx2:
3052 return SelectVTBL(N, true, 2, ARM::VTBX2Pseudo);
3053 case Intrinsic::arm_neon_vtbx3:
3054 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
3055 case Intrinsic::arm_neon_vtbx4:
3056 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
3061 case ARMISD::VTBL1: {
3062 DebugLoc dl = N->getDebugLoc();
3063 EVT VT = N->getValueType(0);
3064 SmallVector<SDValue, 6> Ops;
3066 Ops.push_back(N->getOperand(0));
3067 Ops.push_back(N->getOperand(1));
3068 Ops.push_back(getAL(CurDAG)); // Predicate
3069 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3070 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops.data(), Ops.size());
3072 case ARMISD::VTBL2: {
3073 DebugLoc dl = N->getDebugLoc();
3074 EVT VT = N->getValueType(0);
3076 // Form a REG_SEQUENCE to force register allocation.
3077 SDValue V0 = N->getOperand(0);
3078 SDValue V1 = N->getOperand(1);
3079 SDValue RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
3081 SmallVector<SDValue, 6> Ops;
3082 Ops.push_back(RegSeq);
3083 Ops.push_back(N->getOperand(2));
3084 Ops.push_back(getAL(CurDAG)); // Predicate
3085 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3086 return CurDAG->getMachineNode(ARM::VTBL2Pseudo, dl, VT,
3087 Ops.data(), Ops.size());
3090 case ISD::CONCAT_VECTORS:
3091 return SelectConcatVector(N);
3094 return SelectCode(N);
3097 bool ARMDAGToDAGISel::
3098 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
3099 std::vector<SDValue> &OutOps) {
3100 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
3101 // Require the address to be in a register. That is safe for all ARM
3102 // variants and it is hard to do anything much smarter without knowing
3103 // how the operand is used.
3104 OutOps.push_back(Op);
3108 /// createARMISelDag - This pass converts a legalized DAG into a
3109 /// ARM-specific DAG, ready for instruction scheduling.
3111 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
3112 CodeGenOpt::Level OptLevel) {
3113 return new ARMDAGToDAGISel(TM, OptLevel);