1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "arm-isel"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMTargetMachine.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/LLVMContext.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/SelectionDAGISel.h"
30 #include "llvm/Target/TargetLowering.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Compiler.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
41 DisableShifterOp("disable-shifter-op", cl::Hidden,
42 cl::desc("Disable isel of shifter-op"),
46 CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
47 cl::desc("Check fp vmla / vmls hazard at isel time"),
50 //===--------------------------------------------------------------------===//
51 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
52 /// instructions for SelectionDAG operations.
57 AM2_BASE, // Simple AM2 (+-imm12)
58 AM2_SHOP // Shifter-op AM2
61 class ARMDAGToDAGISel : public SelectionDAGISel {
62 ARMBaseTargetMachine &TM;
63 const ARMBaseInstrInfo *TII;
65 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
66 /// make the right decision when generating code for different targets.
67 const ARMSubtarget *Subtarget;
70 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
71 CodeGenOpt::Level OptLevel)
72 : SelectionDAGISel(tm, OptLevel), TM(tm),
73 TII(static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo())),
74 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
77 virtual const char *getPassName() const {
78 return "ARM Instruction Selection";
81 /// getI32Imm - Return a target constant of type i32 with the specified
83 inline SDValue getI32Imm(unsigned Imm) {
84 return CurDAG->getTargetConstant(Imm, MVT::i32);
87 SDNode *Select(SDNode *N);
90 bool hasNoVMLxHazardUse(SDNode *N) const;
91 bool isShifterOpProfitable(const SDValue &Shift,
92 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
93 bool SelectRegShifterOperand(SDValue N, SDValue &A,
94 SDValue &B, SDValue &C,
95 bool CheckProfitability = true);
96 bool SelectImmShifterOperand(SDValue N, SDValue &A,
97 SDValue &B, bool CheckProfitability = true);
98 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A,
99 SDValue &B, SDValue &C) {
100 // Don't apply the profitability check
101 return SelectRegShifterOperand(N, A, B, C, false);
103 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A,
105 // Don't apply the profitability check
106 return SelectImmShifterOperand(N, A, B, false);
109 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
110 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
112 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
113 SDValue &Offset, SDValue &Opc);
114 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
116 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
119 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
121 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
124 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
126 SelectAddrMode2Worker(N, Base, Offset, Opc);
127 // return SelectAddrMode2ShOp(N, Base, Offset, Opc);
128 // This always matches one way or another.
132 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
133 SDValue &Offset, SDValue &Opc);
134 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
135 SDValue &Offset, SDValue &Opc);
136 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
137 SDValue &Offset, SDValue &Opc);
138 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
139 bool SelectAddrMode3(SDValue N, SDValue &Base,
140 SDValue &Offset, SDValue &Opc);
141 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
142 SDValue &Offset, SDValue &Opc);
143 bool SelectAddrMode5(SDValue N, SDValue &Base,
145 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
146 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
148 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
150 // Thumb Addressing Modes:
151 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
152 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
154 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
155 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
156 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
157 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
159 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
161 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
163 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
165 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
167 // Thumb 2 Addressing Modes:
168 bool SelectT2ShifterOperandReg(SDValue N,
169 SDValue &BaseReg, SDValue &Opc);
170 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
171 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
173 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
175 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
176 SDValue &OffReg, SDValue &ShImm);
178 inline bool is_so_imm(unsigned Imm) const {
179 return ARM_AM::getSOImmVal(Imm) != -1;
182 inline bool is_so_imm_not(unsigned Imm) const {
183 return ARM_AM::getSOImmVal(~Imm) != -1;
186 inline bool is_t2_so_imm(unsigned Imm) const {
187 return ARM_AM::getT2SOImmVal(Imm) != -1;
190 inline bool is_t2_so_imm_not(unsigned Imm) const {
191 return ARM_AM::getT2SOImmVal(~Imm) != -1;
194 // Include the pieces autogenerated from the target description.
195 #include "ARMGenDAGISel.inc"
198 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
200 SDNode *SelectARMIndexedLoad(SDNode *N);
201 SDNode *SelectT2IndexedLoad(SDNode *N);
203 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
204 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
205 /// loads of D registers and even subregs and odd subregs of Q registers.
206 /// For NumVecs <= 2, QOpcodes1 is not used.
207 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
208 const uint16_t *DOpcodes,
209 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
211 /// SelectVST - Select NEON store intrinsics. NumVecs should
212 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
213 /// stores of D registers and even subregs and odd subregs of Q registers.
214 /// For NumVecs <= 2, QOpcodes1 is not used.
215 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
216 const uint16_t *DOpcodes,
217 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
219 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
220 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
221 /// load/store of D registers and Q registers.
222 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
223 bool isUpdating, unsigned NumVecs,
224 const uint16_t *DOpcodes, const uint16_t *QOpcodes);
226 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
227 /// should be 2, 3 or 4. The opcode array specifies the instructions used
228 /// for loading D registers. (Q registers are not supported.)
229 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
230 const uint16_t *Opcodes);
232 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
233 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
234 /// generated to force the table registers to be consecutive.
235 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
237 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
238 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
240 /// SelectCMOVOp - Select CMOV instructions for ARM.
241 SDNode *SelectCMOVOp(SDNode *N);
242 SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
243 ARMCC::CondCodes CCVal, SDValue CCR,
245 SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
246 ARMCC::CondCodes CCVal, SDValue CCR,
248 SDNode *SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
249 ARMCC::CondCodes CCVal, SDValue CCR,
251 SDNode *SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
252 ARMCC::CondCodes CCVal, SDValue CCR,
255 // Select special operations if node forms integer ABS pattern
256 SDNode *SelectABSOp(SDNode *N);
258 SDNode *SelectConcatVector(SDNode *N);
260 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
262 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
263 /// inline asm expressions.
264 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
266 std::vector<SDValue> &OutOps);
268 // Form pairs of consecutive S, D, or Q registers.
269 SDNode *PairSRegs(EVT VT, SDValue V0, SDValue V1);
270 SDNode *PairDRegs(EVT VT, SDValue V0, SDValue V1);
271 SDNode *PairQRegs(EVT VT, SDValue V0, SDValue V1);
273 // Form sequences of 4 consecutive S, D, or Q registers.
274 SDNode *QuadSRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
275 SDNode *QuadDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
276 SDNode *QuadQRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
278 // Get the alignment operand for a NEON VLD or VST instruction.
279 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
283 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
284 /// operand. If so Imm will receive the 32-bit value.
285 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
286 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
287 Imm = cast<ConstantSDNode>(N)->getZExtValue();
293 // isInt32Immediate - This method tests to see if a constant operand.
294 // If so Imm will receive the 32 bit value.
295 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
296 return isInt32Immediate(N.getNode(), Imm);
299 // isOpcWithIntImmediate - This method tests to see if the node is a specific
300 // opcode and that it has a immediate integer right operand.
301 // If so Imm will receive the 32 bit value.
302 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
303 return N->getOpcode() == Opc &&
304 isInt32Immediate(N->getOperand(1).getNode(), Imm);
307 /// \brief Check whether a particular node is a constant value representable as
308 /// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
310 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
311 static bool isScaledConstantInRange(SDValue Node, int Scale,
312 int RangeMin, int RangeMax,
313 int &ScaledConstant) {
314 assert(Scale > 0 && "Invalid scale!");
316 // Check that this is a constant.
317 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
321 ScaledConstant = (int) C->getZExtValue();
322 if ((ScaledConstant % Scale) != 0)
325 ScaledConstant /= Scale;
326 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
329 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
330 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
331 /// least on current ARM implementations) which should be avoidded.
332 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
333 if (OptLevel == CodeGenOpt::None)
336 if (!CheckVMLxHazard)
338 if (!Subtarget->isCortexA8() && !Subtarget->isLikeA9())
344 SDNode *Use = *N->use_begin();
345 if (Use->getOpcode() == ISD::CopyToReg)
347 if (Use->isMachineOpcode()) {
348 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
351 unsigned Opcode = MCID.getOpcode();
352 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
354 // vmlx feeding into another vmlx. We actually want to unfold
355 // the use later in the MLxExpansion pass. e.g.
357 // vmla (stall 8 cycles)
362 // This adds up to about 18 - 19 cycles.
365 // vmul (stall 4 cycles)
366 // vadd adds up to about 14 cycles.
367 return TII->isFpMLxInstruction(Opcode);
373 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
374 ARM_AM::ShiftOpc ShOpcVal,
376 if (!Subtarget->isLikeA9())
378 if (Shift.hasOneUse())
381 return ShOpcVal == ARM_AM::lsl && ShAmt == 2;
384 bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
387 bool CheckProfitability) {
388 if (DisableShifterOp)
391 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
393 // Don't match base register only case. That is matched to a separate
394 // lower complexity pattern with explicit register operand.
395 if (ShOpcVal == ARM_AM::no_shift) return false;
397 BaseReg = N.getOperand(0);
398 unsigned ShImmVal = 0;
399 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
400 if (!RHS) return false;
401 ShImmVal = RHS->getZExtValue() & 31;
402 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
407 bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
411 bool CheckProfitability) {
412 if (DisableShifterOp)
415 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
417 // Don't match base register only case. That is matched to a separate
418 // lower complexity pattern with explicit register operand.
419 if (ShOpcVal == ARM_AM::no_shift) return false;
421 BaseReg = N.getOperand(0);
422 unsigned ShImmVal = 0;
423 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
424 if (RHS) return false;
426 ShReg = N.getOperand(1);
427 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
429 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
435 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
438 // Match simple R + imm12 operands.
441 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
442 !CurDAG->isBaseWithConstantOffset(N)) {
443 if (N.getOpcode() == ISD::FrameIndex) {
444 // Match frame index.
445 int FI = cast<FrameIndexSDNode>(N)->getIndex();
446 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
447 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
451 if (N.getOpcode() == ARMISD::Wrapper &&
452 !(Subtarget->useMovt() &&
453 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
454 Base = N.getOperand(0);
457 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
461 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
462 int RHSC = (int)RHS->getZExtValue();
463 if (N.getOpcode() == ISD::SUB)
466 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
467 Base = N.getOperand(0);
468 if (Base.getOpcode() == ISD::FrameIndex) {
469 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
470 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
472 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
479 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
485 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
487 if (N.getOpcode() == ISD::MUL &&
488 (!Subtarget->isLikeA9() || N.hasOneUse())) {
489 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
490 // X * [3,5,9] -> X + X * [2,4,8] etc.
491 int RHSC = (int)RHS->getZExtValue();
494 ARM_AM::AddrOpc AddSub = ARM_AM::add;
496 AddSub = ARM_AM::sub;
499 if (isPowerOf2_32(RHSC)) {
500 unsigned ShAmt = Log2_32(RHSC);
501 Base = Offset = N.getOperand(0);
502 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
511 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
512 // ISD::OR that is equivalent to an ISD::ADD.
513 !CurDAG->isBaseWithConstantOffset(N))
516 // Leave simple R +/- imm12 operands for LDRi12
517 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
519 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
520 -0x1000+1, 0x1000, RHSC)) // 12 bits.
524 // Otherwise this is R +/- [possibly shifted] R.
525 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
526 ARM_AM::ShiftOpc ShOpcVal =
527 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
530 Base = N.getOperand(0);
531 Offset = N.getOperand(1);
533 if (ShOpcVal != ARM_AM::no_shift) {
534 // Check to see if the RHS of the shift is a constant, if not, we can't fold
536 if (ConstantSDNode *Sh =
537 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
538 ShAmt = Sh->getZExtValue();
539 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
540 Offset = N.getOperand(1).getOperand(0);
543 ShOpcVal = ARM_AM::no_shift;
546 ShOpcVal = ARM_AM::no_shift;
550 // Try matching (R shl C) + (R).
551 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
552 !(Subtarget->isLikeA9() || N.getOperand(0).hasOneUse())) {
553 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
554 if (ShOpcVal != ARM_AM::no_shift) {
555 // Check to see if the RHS of the shift is a constant, if not, we can't
557 if (ConstantSDNode *Sh =
558 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
559 ShAmt = Sh->getZExtValue();
560 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
561 Offset = N.getOperand(0).getOperand(0);
562 Base = N.getOperand(1);
565 ShOpcVal = ARM_AM::no_shift;
568 ShOpcVal = ARM_AM::no_shift;
573 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
581 AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
585 if (N.getOpcode() == ISD::MUL &&
586 (!Subtarget->isLikeA9() || N.hasOneUse())) {
587 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
588 // X * [3,5,9] -> X + X * [2,4,8] etc.
589 int RHSC = (int)RHS->getZExtValue();
592 ARM_AM::AddrOpc AddSub = ARM_AM::add;
594 AddSub = ARM_AM::sub;
597 if (isPowerOf2_32(RHSC)) {
598 unsigned ShAmt = Log2_32(RHSC);
599 Base = Offset = N.getOperand(0);
600 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
609 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
610 // ISD::OR that is equivalent to an ADD.
611 !CurDAG->isBaseWithConstantOffset(N)) {
613 if (N.getOpcode() == ISD::FrameIndex) {
614 int FI = cast<FrameIndexSDNode>(N)->getIndex();
615 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
616 } else if (N.getOpcode() == ARMISD::Wrapper &&
617 !(Subtarget->useMovt() &&
618 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
619 Base = N.getOperand(0);
621 Offset = CurDAG->getRegister(0, MVT::i32);
622 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
628 // Match simple R +/- imm12 operands.
629 if (N.getOpcode() != ISD::SUB) {
631 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
632 -0x1000+1, 0x1000, RHSC)) { // 12 bits.
633 Base = N.getOperand(0);
634 if (Base.getOpcode() == ISD::FrameIndex) {
635 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
636 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
638 Offset = CurDAG->getRegister(0, MVT::i32);
640 ARM_AM::AddrOpc AddSub = ARM_AM::add;
642 AddSub = ARM_AM::sub;
645 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
652 if (Subtarget->isLikeA9() && !N.hasOneUse()) {
653 // Compute R +/- (R << N) and reuse it.
655 Offset = CurDAG->getRegister(0, MVT::i32);
656 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
662 // Otherwise this is R +/- [possibly shifted] R.
663 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
664 ARM_AM::ShiftOpc ShOpcVal =
665 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
668 Base = N.getOperand(0);
669 Offset = N.getOperand(1);
671 if (ShOpcVal != ARM_AM::no_shift) {
672 // Check to see if the RHS of the shift is a constant, if not, we can't fold
674 if (ConstantSDNode *Sh =
675 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
676 ShAmt = Sh->getZExtValue();
677 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
678 Offset = N.getOperand(1).getOperand(0);
681 ShOpcVal = ARM_AM::no_shift;
684 ShOpcVal = ARM_AM::no_shift;
688 // Try matching (R shl C) + (R).
689 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
690 !(Subtarget->isLikeA9() || N.getOperand(0).hasOneUse())) {
691 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
692 if (ShOpcVal != ARM_AM::no_shift) {
693 // Check to see if the RHS of the shift is a constant, if not, we can't
695 if (ConstantSDNode *Sh =
696 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
697 ShAmt = Sh->getZExtValue();
698 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
699 Offset = N.getOperand(0).getOperand(0);
700 Base = N.getOperand(1);
703 ShOpcVal = ARM_AM::no_shift;
706 ShOpcVal = ARM_AM::no_shift;
711 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
716 bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
717 SDValue &Offset, SDValue &Opc) {
718 unsigned Opcode = Op->getOpcode();
719 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
720 ? cast<LoadSDNode>(Op)->getAddressingMode()
721 : cast<StoreSDNode>(Op)->getAddressingMode();
722 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
723 ? ARM_AM::add : ARM_AM::sub;
725 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
729 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
731 if (ShOpcVal != ARM_AM::no_shift) {
732 // Check to see if the RHS of the shift is a constant, if not, we can't fold
734 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
735 ShAmt = Sh->getZExtValue();
736 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
737 Offset = N.getOperand(0);
740 ShOpcVal = ARM_AM::no_shift;
743 ShOpcVal = ARM_AM::no_shift;
747 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
752 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
753 SDValue &Offset, SDValue &Opc) {
754 unsigned Opcode = Op->getOpcode();
755 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
756 ? cast<LoadSDNode>(Op)->getAddressingMode()
757 : cast<StoreSDNode>(Op)->getAddressingMode();
758 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
759 ? ARM_AM::add : ARM_AM::sub;
761 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
762 if (AddSub == ARM_AM::sub) Val *= -1;
763 Offset = CurDAG->getRegister(0, MVT::i32);
764 Opc = CurDAG->getTargetConstant(Val, MVT::i32);
772 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
773 SDValue &Offset, SDValue &Opc) {
774 unsigned Opcode = Op->getOpcode();
775 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
776 ? cast<LoadSDNode>(Op)->getAddressingMode()
777 : cast<StoreSDNode>(Op)->getAddressingMode();
778 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
779 ? ARM_AM::add : ARM_AM::sub;
781 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
782 Offset = CurDAG->getRegister(0, MVT::i32);
783 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
792 bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
797 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
798 SDValue &Base, SDValue &Offset,
800 if (N.getOpcode() == ISD::SUB) {
801 // X - C is canonicalize to X + -C, no need to handle it here.
802 Base = N.getOperand(0);
803 Offset = N.getOperand(1);
804 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
808 if (!CurDAG->isBaseWithConstantOffset(N)) {
810 if (N.getOpcode() == ISD::FrameIndex) {
811 int FI = cast<FrameIndexSDNode>(N)->getIndex();
812 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
814 Offset = CurDAG->getRegister(0, MVT::i32);
815 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
819 // If the RHS is +/- imm8, fold into addr mode.
821 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
822 -256 + 1, 256, RHSC)) { // 8 bits.
823 Base = N.getOperand(0);
824 if (Base.getOpcode() == ISD::FrameIndex) {
825 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
826 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
828 Offset = CurDAG->getRegister(0, MVT::i32);
830 ARM_AM::AddrOpc AddSub = ARM_AM::add;
832 AddSub = ARM_AM::sub;
835 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
839 Base = N.getOperand(0);
840 Offset = N.getOperand(1);
841 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
845 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
846 SDValue &Offset, SDValue &Opc) {
847 unsigned Opcode = Op->getOpcode();
848 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
849 ? cast<LoadSDNode>(Op)->getAddressingMode()
850 : cast<StoreSDNode>(Op)->getAddressingMode();
851 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
852 ? ARM_AM::add : ARM_AM::sub;
854 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
855 Offset = CurDAG->getRegister(0, MVT::i32);
856 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
861 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
865 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
866 SDValue &Base, SDValue &Offset) {
867 if (!CurDAG->isBaseWithConstantOffset(N)) {
869 if (N.getOpcode() == ISD::FrameIndex) {
870 int FI = cast<FrameIndexSDNode>(N)->getIndex();
871 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
872 } else if (N.getOpcode() == ARMISD::Wrapper &&
873 !(Subtarget->useMovt() &&
874 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
875 Base = N.getOperand(0);
877 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
882 // If the RHS is +/- imm8, fold into addr mode.
884 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
885 -256 + 1, 256, RHSC)) {
886 Base = N.getOperand(0);
887 if (Base.getOpcode() == ISD::FrameIndex) {
888 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
889 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
892 ARM_AM::AddrOpc AddSub = ARM_AM::add;
894 AddSub = ARM_AM::sub;
897 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
903 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
908 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
912 unsigned Alignment = 0;
913 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
914 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
915 // The maximum alignment is equal to the memory size being referenced.
916 unsigned LSNAlign = LSN->getAlignment();
917 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
918 if (LSNAlign >= MemSize && MemSize > 1)
921 // All other uses of addrmode6 are for intrinsics. For now just record
922 // the raw alignment value; it will be refined later based on the legal
923 // alignment operands for the intrinsic.
924 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
927 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
931 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
933 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
934 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
935 if (AM != ISD::POST_INC)
938 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
939 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
940 Offset = CurDAG->getRegister(0, MVT::i32);
945 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
946 SDValue &Offset, SDValue &Label) {
947 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
948 Offset = N.getOperand(0);
949 SDValue N1 = N.getOperand(1);
950 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
959 //===----------------------------------------------------------------------===//
960 // Thumb Addressing Modes
961 //===----------------------------------------------------------------------===//
963 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
964 SDValue &Base, SDValue &Offset){
965 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
966 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
967 if (!NC || !NC->isNullValue())
974 Base = N.getOperand(0);
975 Offset = N.getOperand(1);
980 ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
981 SDValue &Offset, unsigned Scale) {
983 SDValue TmpBase, TmpOffImm;
984 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
985 return false; // We want to select tLDRspi / tSTRspi instead.
987 if (N.getOpcode() == ARMISD::Wrapper &&
988 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
989 return false; // We want to select tLDRpci instead.
992 if (!CurDAG->isBaseWithConstantOffset(N))
995 // Thumb does not have [sp, r] address mode.
996 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
997 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
998 if ((LHSR && LHSR->getReg() == ARM::SP) ||
999 (RHSR && RHSR->getReg() == ARM::SP))
1002 // FIXME: Why do we explicitly check for a match here and then return false?
1003 // Presumably to allow something else to match, but shouldn't this be
1006 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
1009 Base = N.getOperand(0);
1010 Offset = N.getOperand(1);
1015 ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
1018 return SelectThumbAddrModeRI(N, Base, Offset, 1);
1022 ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
1025 return SelectThumbAddrModeRI(N, Base, Offset, 2);
1029 ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
1032 return SelectThumbAddrModeRI(N, Base, Offset, 4);
1036 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1037 SDValue &Base, SDValue &OffImm) {
1039 SDValue TmpBase, TmpOffImm;
1040 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1041 return false; // We want to select tLDRspi / tSTRspi instead.
1043 if (N.getOpcode() == ARMISD::Wrapper &&
1044 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1045 return false; // We want to select tLDRpci instead.
1048 if (!CurDAG->isBaseWithConstantOffset(N)) {
1049 if (N.getOpcode() == ARMISD::Wrapper &&
1050 !(Subtarget->useMovt() &&
1051 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1052 Base = N.getOperand(0);
1057 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1061 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1062 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1063 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1064 (RHSR && RHSR->getReg() == ARM::SP)) {
1065 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
1066 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1067 unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
1068 unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
1070 // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
1071 if (LHSC != 0 || RHSC != 0) return false;
1074 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1078 // If the RHS is + imm5 * scale, fold into addr mode.
1080 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1081 Base = N.getOperand(0);
1082 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1086 Base = N.getOperand(0);
1087 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1092 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1094 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1098 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1100 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1104 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1106 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1109 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1110 SDValue &Base, SDValue &OffImm) {
1111 if (N.getOpcode() == ISD::FrameIndex) {
1112 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1113 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1114 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1118 if (!CurDAG->isBaseWithConstantOffset(N))
1121 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1122 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1123 (LHSR && LHSR->getReg() == ARM::SP)) {
1124 // If the RHS is + imm8 * scale, fold into addr mode.
1126 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1127 Base = N.getOperand(0);
1128 if (Base.getOpcode() == ISD::FrameIndex) {
1129 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1130 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1132 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1141 //===----------------------------------------------------------------------===//
1142 // Thumb 2 Addressing Modes
1143 //===----------------------------------------------------------------------===//
1146 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1148 if (DisableShifterOp)
1151 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
1153 // Don't match base register only case. That is matched to a separate
1154 // lower complexity pattern with explicit register operand.
1155 if (ShOpcVal == ARM_AM::no_shift) return false;
1157 BaseReg = N.getOperand(0);
1158 unsigned ShImmVal = 0;
1159 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1160 ShImmVal = RHS->getZExtValue() & 31;
1161 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
1168 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1169 SDValue &Base, SDValue &OffImm) {
1170 // Match simple R + imm12 operands.
1173 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1174 !CurDAG->isBaseWithConstantOffset(N)) {
1175 if (N.getOpcode() == ISD::FrameIndex) {
1176 // Match frame index.
1177 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1178 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1179 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1183 if (N.getOpcode() == ARMISD::Wrapper &&
1184 !(Subtarget->useMovt() &&
1185 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1186 Base = N.getOperand(0);
1187 if (Base.getOpcode() == ISD::TargetConstantPool)
1188 return false; // We want to select t2LDRpci instead.
1191 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1195 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1196 if (SelectT2AddrModeImm8(N, Base, OffImm))
1197 // Let t2LDRi8 handle (R - imm8).
1200 int RHSC = (int)RHS->getZExtValue();
1201 if (N.getOpcode() == ISD::SUB)
1204 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1205 Base = N.getOperand(0);
1206 if (Base.getOpcode() == ISD::FrameIndex) {
1207 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1208 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1210 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1217 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1221 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1222 SDValue &Base, SDValue &OffImm) {
1223 // Match simple R - imm8 operands.
1224 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1225 !CurDAG->isBaseWithConstantOffset(N))
1228 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1229 int RHSC = (int)RHS->getSExtValue();
1230 if (N.getOpcode() == ISD::SUB)
1233 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1234 Base = N.getOperand(0);
1235 if (Base.getOpcode() == ISD::FrameIndex) {
1236 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1237 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1239 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1247 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1249 unsigned Opcode = Op->getOpcode();
1250 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1251 ? cast<LoadSDNode>(Op)->getAddressingMode()
1252 : cast<StoreSDNode>(Op)->getAddressingMode();
1254 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1255 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1256 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
1257 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
1264 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1266 SDValue &OffReg, SDValue &ShImm) {
1267 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1268 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1271 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1272 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1273 int RHSC = (int)RHS->getZExtValue();
1274 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1276 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1280 // Look for (R + R) or (R + (R << [1,2,3])).
1282 Base = N.getOperand(0);
1283 OffReg = N.getOperand(1);
1285 // Swap if it is ((R << c) + R).
1286 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1287 if (ShOpcVal != ARM_AM::lsl) {
1288 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1289 if (ShOpcVal == ARM_AM::lsl)
1290 std::swap(Base, OffReg);
1293 if (ShOpcVal == ARM_AM::lsl) {
1294 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1296 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1297 ShAmt = Sh->getZExtValue();
1298 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1299 OffReg = OffReg.getOperand(0);
1302 ShOpcVal = ARM_AM::no_shift;
1305 ShOpcVal = ARM_AM::no_shift;
1309 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
1314 //===--------------------------------------------------------------------===//
1316 /// getAL - Returns a ARMCC::AL immediate node.
1317 static inline SDValue getAL(SelectionDAG *CurDAG) {
1318 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
1321 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1322 LoadSDNode *LD = cast<LoadSDNode>(N);
1323 ISD::MemIndexedMode AM = LD->getAddressingMode();
1324 if (AM == ISD::UNINDEXED)
1327 EVT LoadedVT = LD->getMemoryVT();
1328 SDValue Offset, AMOpc;
1329 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1330 unsigned Opcode = 0;
1332 if (LoadedVT == MVT::i32 && isPre &&
1333 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1334 Opcode = ARM::LDR_PRE_IMM;
1336 } else if (LoadedVT == MVT::i32 && !isPre &&
1337 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1338 Opcode = ARM::LDR_POST_IMM;
1340 } else if (LoadedVT == MVT::i32 &&
1341 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1342 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1345 } else if (LoadedVT == MVT::i16 &&
1346 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1348 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1349 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1350 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1351 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1352 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1353 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1355 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1359 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1361 Opcode = ARM::LDRB_PRE_IMM;
1362 } else if (!isPre &&
1363 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1365 Opcode = ARM::LDRB_POST_IMM;
1366 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1368 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1374 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1375 SDValue Chain = LD->getChain();
1376 SDValue Base = LD->getBasePtr();
1377 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
1378 CurDAG->getRegister(0, MVT::i32), Chain };
1379 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32,
1380 MVT::i32, MVT::Other, Ops, 5);
1382 SDValue Chain = LD->getChain();
1383 SDValue Base = LD->getBasePtr();
1384 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
1385 CurDAG->getRegister(0, MVT::i32), Chain };
1386 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32,
1387 MVT::i32, MVT::Other, Ops, 6);
1394 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1395 LoadSDNode *LD = cast<LoadSDNode>(N);
1396 ISD::MemIndexedMode AM = LD->getAddressingMode();
1397 if (AM == ISD::UNINDEXED)
1400 EVT LoadedVT = LD->getMemoryVT();
1401 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1403 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1404 unsigned Opcode = 0;
1406 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1407 switch (LoadedVT.getSimpleVT().SimpleTy) {
1409 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1413 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1415 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1420 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1422 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1431 SDValue Chain = LD->getChain();
1432 SDValue Base = LD->getBasePtr();
1433 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
1434 CurDAG->getRegister(0, MVT::i32), Chain };
1435 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
1436 MVT::Other, Ops, 5);
1442 /// PairSRegs - Form a D register from a pair of S registers.
1444 SDNode *ARMDAGToDAGISel::PairSRegs(EVT VT, SDValue V0, SDValue V1) {
1445 DebugLoc dl = V0.getNode()->getDebugLoc();
1447 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
1448 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1449 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1450 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1451 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1454 /// PairDRegs - Form a quad register from a pair of D registers.
1456 SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) {
1457 DebugLoc dl = V0.getNode()->getDebugLoc();
1458 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
1459 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1460 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1461 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1462 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1465 /// PairQRegs - Form 4 consecutive D registers from a pair of Q registers.
1467 SDNode *ARMDAGToDAGISel::PairQRegs(EVT VT, SDValue V0, SDValue V1) {
1468 DebugLoc dl = V0.getNode()->getDebugLoc();
1469 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1470 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1471 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1472 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1473 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1476 /// QuadSRegs - Form 4 consecutive S registers.
1478 SDNode *ARMDAGToDAGISel::QuadSRegs(EVT VT, SDValue V0, SDValue V1,
1479 SDValue V2, SDValue V3) {
1480 DebugLoc dl = V0.getNode()->getDebugLoc();
1482 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
1483 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1484 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1485 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
1486 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
1487 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1488 V2, SubReg2, V3, SubReg3 };
1489 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1492 /// QuadDRegs - Form 4 consecutive D registers.
1494 SDNode *ARMDAGToDAGISel::QuadDRegs(EVT VT, SDValue V0, SDValue V1,
1495 SDValue V2, SDValue V3) {
1496 DebugLoc dl = V0.getNode()->getDebugLoc();
1497 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1498 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1499 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1500 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
1501 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
1502 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1503 V2, SubReg2, V3, SubReg3 };
1504 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1507 /// QuadQRegs - Form 4 consecutive Q registers.
1509 SDNode *ARMDAGToDAGISel::QuadQRegs(EVT VT, SDValue V0, SDValue V1,
1510 SDValue V2, SDValue V3) {
1511 DebugLoc dl = V0.getNode()->getDebugLoc();
1512 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
1513 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1514 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1515 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
1516 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
1517 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1518 V2, SubReg2, V3, SubReg3 };
1519 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1522 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1523 /// of a NEON VLD or VST instruction. The supported values depend on the
1524 /// number of registers being loaded.
1525 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
1526 bool is64BitVector) {
1527 unsigned NumRegs = NumVecs;
1528 if (!is64BitVector && NumVecs < 3)
1531 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1532 if (Alignment >= 32 && NumRegs == 4)
1534 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1536 else if (Alignment >= 8)
1541 return CurDAG->getTargetConstant(Alignment, MVT::i32);
1544 // Get the register stride update opcode of a VLD/VST instruction that
1545 // is otherwise equivalent to the given fixed stride updating instruction.
1546 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
1549 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
1550 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
1551 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
1552 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
1553 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
1554 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
1555 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
1556 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
1558 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
1559 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
1560 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
1561 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
1562 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
1563 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
1564 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
1565 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
1566 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
1567 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
1569 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
1570 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
1571 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
1572 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
1573 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
1574 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
1576 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
1577 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
1578 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
1579 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
1580 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
1581 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
1583 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
1584 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
1585 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
1587 return Opc; // If not one we handle, return it unchanged.
1590 SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
1591 const uint16_t *DOpcodes,
1592 const uint16_t *QOpcodes0,
1593 const uint16_t *QOpcodes1) {
1594 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
1595 DebugLoc dl = N->getDebugLoc();
1597 SDValue MemAddr, Align;
1598 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1599 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1602 SDValue Chain = N->getOperand(0);
1603 EVT VT = N->getValueType(0);
1604 bool is64BitVector = VT.is64BitVector();
1605 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1607 unsigned OpcodeIndex;
1608 switch (VT.getSimpleVT().SimpleTy) {
1609 default: llvm_unreachable("unhandled vld type");
1610 // Double-register operations:
1611 case MVT::v8i8: OpcodeIndex = 0; break;
1612 case MVT::v4i16: OpcodeIndex = 1; break;
1614 case MVT::v2i32: OpcodeIndex = 2; break;
1615 case MVT::v1i64: OpcodeIndex = 3; break;
1616 // Quad-register operations:
1617 case MVT::v16i8: OpcodeIndex = 0; break;
1618 case MVT::v8i16: OpcodeIndex = 1; break;
1620 case MVT::v4i32: OpcodeIndex = 2; break;
1621 case MVT::v2i64: OpcodeIndex = 3;
1622 assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
1630 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1633 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1635 std::vector<EVT> ResTys;
1636 ResTys.push_back(ResTy);
1638 ResTys.push_back(MVT::i32);
1639 ResTys.push_back(MVT::Other);
1641 SDValue Pred = getAL(CurDAG);
1642 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1644 SmallVector<SDValue, 7> Ops;
1646 // Double registers and VLD1/VLD2 quad registers are directly supported.
1647 if (is64BitVector || NumVecs <= 2) {
1648 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1649 QOpcodes0[OpcodeIndex]);
1650 Ops.push_back(MemAddr);
1651 Ops.push_back(Align);
1653 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1654 // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
1655 // case entirely when the rest are updated to that form, too.
1656 if ((NumVecs == 1 || NumVecs == 2) && !isa<ConstantSDNode>(Inc.getNode()))
1657 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1658 // We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
1659 // check for that explicitly too. Horribly hacky, but temporary.
1660 if ((NumVecs != 1 && NumVecs != 2 && Opc != ARM::VLD1q64wb_fixed) ||
1661 !isa<ConstantSDNode>(Inc.getNode()))
1662 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1664 Ops.push_back(Pred);
1665 Ops.push_back(Reg0);
1666 Ops.push_back(Chain);
1667 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1670 // Otherwise, quad registers are loaded with two separate instructions,
1671 // where one loads the even registers and the other loads the odd registers.
1672 EVT AddrTy = MemAddr.getValueType();
1674 // Load the even subregs. This is always an updating load, so that it
1675 // provides the address to the second load for the odd subregs.
1677 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1678 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1679 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1680 ResTy, AddrTy, MVT::Other, OpsA, 7);
1681 Chain = SDValue(VLdA, 2);
1683 // Load the odd subregs.
1684 Ops.push_back(SDValue(VLdA, 1));
1685 Ops.push_back(Align);
1687 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1688 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1689 "only constant post-increment update allowed for VLD3/4");
1691 Ops.push_back(Reg0);
1693 Ops.push_back(SDValue(VLdA, 0));
1694 Ops.push_back(Pred);
1695 Ops.push_back(Reg0);
1696 Ops.push_back(Chain);
1697 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1698 Ops.data(), Ops.size());
1701 // Transfer memoperands.
1702 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1703 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1704 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
1709 // Extract out the subregisters.
1710 SDValue SuperReg = SDValue(VLd, 0);
1711 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1712 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1713 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
1714 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1715 ReplaceUses(SDValue(N, Vec),
1716 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1717 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1719 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
1723 SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
1724 const uint16_t *DOpcodes,
1725 const uint16_t *QOpcodes0,
1726 const uint16_t *QOpcodes1) {
1727 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1728 DebugLoc dl = N->getDebugLoc();
1730 SDValue MemAddr, Align;
1731 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1732 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1733 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1736 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1737 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1739 SDValue Chain = N->getOperand(0);
1740 EVT VT = N->getOperand(Vec0Idx).getValueType();
1741 bool is64BitVector = VT.is64BitVector();
1742 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1744 unsigned OpcodeIndex;
1745 switch (VT.getSimpleVT().SimpleTy) {
1746 default: llvm_unreachable("unhandled vst type");
1747 // Double-register operations:
1748 case MVT::v8i8: OpcodeIndex = 0; break;
1749 case MVT::v4i16: OpcodeIndex = 1; break;
1751 case MVT::v2i32: OpcodeIndex = 2; break;
1752 case MVT::v1i64: OpcodeIndex = 3; break;
1753 // Quad-register operations:
1754 case MVT::v16i8: OpcodeIndex = 0; break;
1755 case MVT::v8i16: OpcodeIndex = 1; break;
1757 case MVT::v4i32: OpcodeIndex = 2; break;
1758 case MVT::v2i64: OpcodeIndex = 3;
1759 assert(NumVecs == 1 && "v2i64 type only supported for VST1");
1763 std::vector<EVT> ResTys;
1765 ResTys.push_back(MVT::i32);
1766 ResTys.push_back(MVT::Other);
1768 SDValue Pred = getAL(CurDAG);
1769 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1770 SmallVector<SDValue, 7> Ops;
1772 // Double registers and VST1/VST2 quad registers are directly supported.
1773 if (is64BitVector || NumVecs <= 2) {
1776 SrcReg = N->getOperand(Vec0Idx);
1777 } else if (is64BitVector) {
1778 // Form a REG_SEQUENCE to force register allocation.
1779 SDValue V0 = N->getOperand(Vec0Idx + 0);
1780 SDValue V1 = N->getOperand(Vec0Idx + 1);
1782 SrcReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1784 SDValue V2 = N->getOperand(Vec0Idx + 2);
1785 // If it's a vst3, form a quad D-register and leave the last part as
1787 SDValue V3 = (NumVecs == 3)
1788 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1789 : N->getOperand(Vec0Idx + 3);
1790 SrcReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1793 // Form a QQ register.
1794 SDValue Q0 = N->getOperand(Vec0Idx);
1795 SDValue Q1 = N->getOperand(Vec0Idx + 1);
1796 SrcReg = SDValue(PairQRegs(MVT::v4i64, Q0, Q1), 0);
1799 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1800 QOpcodes0[OpcodeIndex]);
1801 Ops.push_back(MemAddr);
1802 Ops.push_back(Align);
1804 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1805 // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
1806 // case entirely when the rest are updated to that form, too.
1807 if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
1808 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1809 // We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
1810 // check for that explicitly too. Horribly hacky, but temporary.
1811 if ((NumVecs > 2 && Opc != ARM::VST1q64wb_fixed) ||
1812 !isa<ConstantSDNode>(Inc.getNode()))
1813 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1815 Ops.push_back(SrcReg);
1816 Ops.push_back(Pred);
1817 Ops.push_back(Reg0);
1818 Ops.push_back(Chain);
1820 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1822 // Transfer memoperands.
1823 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
1828 // Otherwise, quad registers are stored with two separate instructions,
1829 // where one stores the even registers and the other stores the odd registers.
1831 // Form the QQQQ REG_SEQUENCE.
1832 SDValue V0 = N->getOperand(Vec0Idx + 0);
1833 SDValue V1 = N->getOperand(Vec0Idx + 1);
1834 SDValue V2 = N->getOperand(Vec0Idx + 2);
1835 SDValue V3 = (NumVecs == 3)
1836 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1837 : N->getOperand(Vec0Idx + 3);
1838 SDValue RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1840 // Store the even D registers. This is always an updating store, so that it
1841 // provides the address to the second store for the odd subregs.
1842 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
1843 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1844 MemAddr.getValueType(),
1845 MVT::Other, OpsA, 7);
1846 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
1847 Chain = SDValue(VStA, 1);
1849 // Store the odd D registers.
1850 Ops.push_back(SDValue(VStA, 0));
1851 Ops.push_back(Align);
1853 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1854 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1855 "only constant post-increment update allowed for VST3/4");
1857 Ops.push_back(Reg0);
1859 Ops.push_back(RegSeq);
1860 Ops.push_back(Pred);
1861 Ops.push_back(Reg0);
1862 Ops.push_back(Chain);
1863 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1864 Ops.data(), Ops.size());
1865 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
1869 SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
1870 bool isUpdating, unsigned NumVecs,
1871 const uint16_t *DOpcodes,
1872 const uint16_t *QOpcodes) {
1873 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
1874 DebugLoc dl = N->getDebugLoc();
1876 SDValue MemAddr, Align;
1877 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1878 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1879 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1882 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1883 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1885 SDValue Chain = N->getOperand(0);
1887 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
1888 EVT VT = N->getOperand(Vec0Idx).getValueType();
1889 bool is64BitVector = VT.is64BitVector();
1891 unsigned Alignment = 0;
1893 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1894 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
1895 if (Alignment > NumBytes)
1896 Alignment = NumBytes;
1897 if (Alignment < 8 && Alignment < NumBytes)
1899 // Alignment must be a power of two; make sure of that.
1900 Alignment = (Alignment & -Alignment);
1904 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1906 unsigned OpcodeIndex;
1907 switch (VT.getSimpleVT().SimpleTy) {
1908 default: llvm_unreachable("unhandled vld/vst lane type");
1909 // Double-register operations:
1910 case MVT::v8i8: OpcodeIndex = 0; break;
1911 case MVT::v4i16: OpcodeIndex = 1; break;
1913 case MVT::v2i32: OpcodeIndex = 2; break;
1914 // Quad-register operations:
1915 case MVT::v8i16: OpcodeIndex = 0; break;
1917 case MVT::v4i32: OpcodeIndex = 1; break;
1920 std::vector<EVT> ResTys;
1922 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1925 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
1926 MVT::i64, ResTyElts));
1929 ResTys.push_back(MVT::i32);
1930 ResTys.push_back(MVT::Other);
1932 SDValue Pred = getAL(CurDAG);
1933 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1935 SmallVector<SDValue, 8> Ops;
1936 Ops.push_back(MemAddr);
1937 Ops.push_back(Align);
1939 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1940 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1944 SDValue V0 = N->getOperand(Vec0Idx + 0);
1945 SDValue V1 = N->getOperand(Vec0Idx + 1);
1948 SuperReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1950 SuperReg = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
1952 SDValue V2 = N->getOperand(Vec0Idx + 2);
1953 SDValue V3 = (NumVecs == 3)
1954 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1955 : N->getOperand(Vec0Idx + 3);
1957 SuperReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1959 SuperReg = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1961 Ops.push_back(SuperReg);
1962 Ops.push_back(getI32Imm(Lane));
1963 Ops.push_back(Pred);
1964 Ops.push_back(Reg0);
1965 Ops.push_back(Chain);
1967 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1968 QOpcodes[OpcodeIndex]);
1969 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys,
1970 Ops.data(), Ops.size());
1971 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
1975 // Extract the subregisters.
1976 SuperReg = SDValue(VLdLn, 0);
1977 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1978 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1979 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
1980 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1981 ReplaceUses(SDValue(N, Vec),
1982 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1983 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
1985 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
1989 SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
1991 const uint16_t *Opcodes) {
1992 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
1993 DebugLoc dl = N->getDebugLoc();
1995 SDValue MemAddr, Align;
1996 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
1999 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2000 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2002 SDValue Chain = N->getOperand(0);
2003 EVT VT = N->getValueType(0);
2005 unsigned Alignment = 0;
2007 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2008 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2009 if (Alignment > NumBytes)
2010 Alignment = NumBytes;
2011 if (Alignment < 8 && Alignment < NumBytes)
2013 // Alignment must be a power of two; make sure of that.
2014 Alignment = (Alignment & -Alignment);
2018 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2020 unsigned OpcodeIndex;
2021 switch (VT.getSimpleVT().SimpleTy) {
2022 default: llvm_unreachable("unhandled vld-dup type");
2023 case MVT::v8i8: OpcodeIndex = 0; break;
2024 case MVT::v4i16: OpcodeIndex = 1; break;
2026 case MVT::v2i32: OpcodeIndex = 2; break;
2029 SDValue Pred = getAL(CurDAG);
2030 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2032 unsigned Opc = Opcodes[OpcodeIndex];
2033 SmallVector<SDValue, 6> Ops;
2034 Ops.push_back(MemAddr);
2035 Ops.push_back(Align);
2037 // fixed-stride update instructions don't have an explicit writeback
2038 // operand. It's implicit in the opcode itself.
2039 SDValue Inc = N->getOperand(2);
2040 if (!isa<ConstantSDNode>(Inc.getNode()))
2042 // FIXME: VLD3 and VLD4 haven't been updated to that form yet.
2043 else if (NumVecs > 2)
2044 Ops.push_back(Reg0);
2046 Ops.push_back(Pred);
2047 Ops.push_back(Reg0);
2048 Ops.push_back(Chain);
2050 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2051 std::vector<EVT> ResTys;
2052 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
2054 ResTys.push_back(MVT::i32);
2055 ResTys.push_back(MVT::Other);
2057 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
2058 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
2059 SuperReg = SDValue(VLdDup, 0);
2061 // Extract the subregisters.
2062 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
2063 unsigned SubIdx = ARM::dsub_0;
2064 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2065 ReplaceUses(SDValue(N, Vec),
2066 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
2067 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
2069 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
2073 SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
2075 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
2076 DebugLoc dl = N->getDebugLoc();
2077 EVT VT = N->getValueType(0);
2078 unsigned FirstTblReg = IsExt ? 2 : 1;
2080 // Form a REG_SEQUENCE to force register allocation.
2082 SDValue V0 = N->getOperand(FirstTblReg + 0);
2083 SDValue V1 = N->getOperand(FirstTblReg + 1);
2085 RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
2087 SDValue V2 = N->getOperand(FirstTblReg + 2);
2088 // If it's a vtbl3, form a quad D-register and leave the last part as
2090 SDValue V3 = (NumVecs == 3)
2091 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2092 : N->getOperand(FirstTblReg + 3);
2093 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
2096 SmallVector<SDValue, 6> Ops;
2098 Ops.push_back(N->getOperand(1));
2099 Ops.push_back(RegSeq);
2100 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
2101 Ops.push_back(getAL(CurDAG)); // predicate
2102 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
2103 return CurDAG->getMachineNode(Opc, dl, VT, Ops.data(), Ops.size());
2106 SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
2108 if (!Subtarget->hasV6T2Ops())
2111 unsigned Opc = isSigned ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
2112 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
2115 // For unsigned extracts, check for a shift right and mask
2116 unsigned And_imm = 0;
2117 if (N->getOpcode() == ISD::AND) {
2118 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
2120 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2121 if (And_imm & (And_imm + 1))
2124 unsigned Srl_imm = 0;
2125 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
2127 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2129 // Note: The width operand is encoded as width-1.
2130 unsigned Width = CountTrailingOnes_32(And_imm) - 1;
2131 unsigned LSB = Srl_imm;
2132 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2133 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2134 CurDAG->getTargetConstant(LSB, MVT::i32),
2135 CurDAG->getTargetConstant(Width, MVT::i32),
2136 getAL(CurDAG), Reg0 };
2137 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2143 // Otherwise, we're looking for a shift of a shift
2144 unsigned Shl_imm = 0;
2145 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
2146 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
2147 unsigned Srl_imm = 0;
2148 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
2149 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2150 // Note: The width operand is encoded as width-1.
2151 unsigned Width = 32 - Srl_imm - 1;
2152 int LSB = Srl_imm - Shl_imm;
2155 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2156 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2157 CurDAG->getTargetConstant(LSB, MVT::i32),
2158 CurDAG->getTargetConstant(Width, MVT::i32),
2159 getAL(CurDAG), Reg0 };
2160 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2166 SDNode *ARMDAGToDAGISel::
2167 SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2168 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2171 if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
2172 unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue();
2173 unsigned SOShOp = ARM_AM::getSORegShOp(SOVal);
2176 case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break;
2177 case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break;
2178 case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break;
2179 case ARM_AM::ror: Opc = ARM::t2MOVCCror; break;
2181 llvm_unreachable("Unknown so_reg opcode!");
2184 CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
2185 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2186 SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag };
2187 return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6);
2192 SDNode *ARMDAGToDAGISel::
2193 SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2194 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2198 if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) {
2199 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2200 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp2, CC, CCR, InFlag };
2201 return CurDAG->SelectNodeTo(N, ARM::MOVCCsi, MVT::i32, Ops, 6);
2204 if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
2205 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2206 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag };
2207 return CurDAG->SelectNodeTo(N, ARM::MOVCCsr, MVT::i32, Ops, 7);
2212 SDNode *ARMDAGToDAGISel::
2213 SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2214 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2215 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2220 unsigned TrueImm = T->getZExtValue();
2221 if (is_t2_so_imm(TrueImm)) {
2222 Opc = ARM::t2MOVCCi;
2223 } else if (TrueImm <= 0xffff) {
2224 Opc = ARM::t2MOVCCi16;
2225 } else if (is_t2_so_imm_not(TrueImm)) {
2227 Opc = ARM::t2MVNCCi;
2228 } else if (TrueVal.getNode()->hasOneUse() && Subtarget->hasV6T2Ops()) {
2230 Opc = ARM::t2MOVCCi32imm;
2234 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2235 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2236 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2237 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2243 SDNode *ARMDAGToDAGISel::
2244 SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2245 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2246 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2251 unsigned TrueImm = T->getZExtValue();
2252 bool isSoImm = is_so_imm(TrueImm);
2255 } else if (Subtarget->hasV6T2Ops() && TrueImm <= 0xffff) {
2256 Opc = ARM::MOVCCi16;
2257 } else if (is_so_imm_not(TrueImm)) {
2260 } else if (TrueVal.getNode()->hasOneUse() &&
2261 (Subtarget->hasV6T2Ops() || ARM_AM::isSOImmTwoPartVal(TrueImm))) {
2263 Opc = ARM::MOVCCi32imm;
2267 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2268 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2269 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2270 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2276 SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
2277 EVT VT = N->getValueType(0);
2278 SDValue FalseVal = N->getOperand(0);
2279 SDValue TrueVal = N->getOperand(1);
2280 SDValue CC = N->getOperand(2);
2281 SDValue CCR = N->getOperand(3);
2282 SDValue InFlag = N->getOperand(4);
2283 assert(CC.getOpcode() == ISD::Constant);
2284 assert(CCR.getOpcode() == ISD::Register);
2285 ARMCC::CondCodes CCVal =
2286 (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue();
2288 if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
2289 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2290 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2291 // Pattern complexity = 18 cost = 1 size = 0
2292 if (Subtarget->isThumb()) {
2293 SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal,
2294 CCVal, CCR, InFlag);
2296 Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal,
2297 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2301 SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal,
2302 CCVal, CCR, InFlag);
2304 Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal,
2305 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2310 // Pattern: (ARMcmov:i32 GPR:i32:$false,
2311 // (imm:i32)<<P:Pred_so_imm>>:$true,
2313 // Emits: (MOVCCi:i32 GPR:i32:$false,
2314 // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc)
2315 // Pattern complexity = 10 cost = 1 size = 0
2316 if (Subtarget->isThumb()) {
2317 SDNode *Res = SelectT2CMOVImmOp(N, FalseVal, TrueVal,
2318 CCVal, CCR, InFlag);
2320 Res = SelectT2CMOVImmOp(N, TrueVal, FalseVal,
2321 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2325 SDNode *Res = SelectARMCMOVImmOp(N, FalseVal, TrueVal,
2326 CCVal, CCR, InFlag);
2328 Res = SelectARMCMOVImmOp(N, TrueVal, FalseVal,
2329 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2335 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2336 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2337 // Pattern complexity = 6 cost = 1 size = 0
2339 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2340 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2341 // Pattern complexity = 6 cost = 11 size = 0
2343 // Also VMOVScc and VMOVDcc.
2344 SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32);
2345 SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag };
2347 switch (VT.getSimpleVT().SimpleTy) {
2348 default: llvm_unreachable("Illegal conditional move type!");
2350 Opc = Subtarget->isThumb()
2351 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo)
2361 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
2364 /// Target-specific DAG combining for ISD::XOR.
2365 /// Target-independent combining lowers SELECT_CC nodes of the form
2366 /// select_cc setg[ge] X, 0, X, -X
2367 /// select_cc setgt X, -1, X, -X
2368 /// select_cc setl[te] X, 0, -X, X
2369 /// select_cc setlt X, 1, -X, X
2370 /// which represent Integer ABS into:
2371 /// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
2372 /// ARM instruction selection detects the latter and matches it to
2373 /// ARM::ABS or ARM::t2ABS machine node.
2374 SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
2375 SDValue XORSrc0 = N->getOperand(0);
2376 SDValue XORSrc1 = N->getOperand(1);
2377 EVT VT = N->getValueType(0);
2379 if (Subtarget->isThumb1Only())
2382 if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
2385 SDValue ADDSrc0 = XORSrc0.getOperand(0);
2386 SDValue ADDSrc1 = XORSrc0.getOperand(1);
2387 SDValue SRASrc0 = XORSrc1.getOperand(0);
2388 SDValue SRASrc1 = XORSrc1.getOperand(1);
2389 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
2390 EVT XType = SRASrc0.getValueType();
2391 unsigned Size = XType.getSizeInBits() - 1;
2393 if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
2394 XType.isInteger() && SRAConstant != NULL &&
2395 Size == SRAConstant->getZExtValue()) {
2396 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
2397 return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
2403 SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2404 // The only time a CONCAT_VECTORS operation can have legal types is when
2405 // two 64-bit vectors are concatenated to a 128-bit vector.
2406 EVT VT = N->getValueType(0);
2407 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2408 llvm_unreachable("unexpected CONCAT_VECTORS");
2409 return PairDRegs(VT, N->getOperand(0), N->getOperand(1));
2412 SDNode *ARMDAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
2413 SmallVector<SDValue, 6> Ops;
2414 Ops.push_back(Node->getOperand(1)); // Ptr
2415 Ops.push_back(Node->getOperand(2)); // Low part of Val1
2416 Ops.push_back(Node->getOperand(3)); // High part of Val1
2417 if (Opc == ARM::ATOMCMPXCHG6432) {
2418 Ops.push_back(Node->getOperand(4)); // Low part of Val2
2419 Ops.push_back(Node->getOperand(5)); // High part of Val2
2421 Ops.push_back(Node->getOperand(0)); // Chain
2422 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2423 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
2424 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
2425 MVT::i32, MVT::i32, MVT::Other,
2426 Ops.data() ,Ops.size());
2427 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
2431 SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2432 DebugLoc dl = N->getDebugLoc();
2434 if (N->isMachineOpcode())
2435 return NULL; // Already selected.
2437 switch (N->getOpcode()) {
2440 // Select special operations if XOR node forms integer ABS pattern
2441 SDNode *ResNode = SelectABSOp(N);
2444 // Other cases are autogenerated.
2447 case ISD::Constant: {
2448 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2450 if (Subtarget->hasThumb2())
2451 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2452 // be done with MOV + MOVT, at worst.
2455 if (Subtarget->isThumb()) {
2456 UseCP = (Val > 255 && // MOV
2457 ~Val > 255 && // MOV + MVN
2458 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
2460 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2461 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2462 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
2467 CurDAG->getTargetConstantPool(ConstantInt::get(
2468 Type::getInt32Ty(*CurDAG->getContext()), Val),
2469 TLI.getPointerTy());
2472 if (Subtarget->isThumb1Only()) {
2473 SDValue Pred = getAL(CurDAG);
2474 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2475 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2476 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2481 CurDAG->getTargetConstant(0, MVT::i32),
2483 CurDAG->getRegister(0, MVT::i32),
2484 CurDAG->getEntryNode()
2486 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2489 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2493 // Other cases are autogenerated.
2496 case ISD::FrameIndex: {
2497 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2498 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2499 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
2500 if (Subtarget->isThumb1Only()) {
2501 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2502 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2503 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops, 4);
2505 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2506 ARM::t2ADDri : ARM::ADDri);
2507 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2508 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2509 CurDAG->getRegister(0, MVT::i32) };
2510 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2514 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2518 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2522 if (Subtarget->isThumb1Only())
2524 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2525 unsigned RHSV = C->getZExtValue();
2527 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2528 unsigned ShImm = Log2_32(RHSV-1);
2531 SDValue V = N->getOperand(0);
2532 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2533 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2534 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2535 if (Subtarget->isThumb()) {
2536 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2537 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
2539 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2540 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops, 7);
2543 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2544 unsigned ShImm = Log2_32(RHSV+1);
2547 SDValue V = N->getOperand(0);
2548 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2549 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2550 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2551 if (Subtarget->isThumb()) {
2552 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2553 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6);
2555 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2556 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops, 7);
2562 // Check for unsigned bitfield extract
2563 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2566 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2567 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2568 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2569 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2570 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2571 EVT VT = N->getValueType(0);
2574 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2576 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2579 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2580 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2583 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2584 SDValue N2 = N0.getOperand(1);
2585 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2588 unsigned N1CVal = N1C->getZExtValue();
2589 unsigned N2CVal = N2C->getZExtValue();
2590 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2591 (N1CVal & 0xffffU) == 0xffffU &&
2592 (N2CVal & 0xffffU) == 0x0U) {
2593 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2595 SDValue Ops[] = { N0.getOperand(0), Imm16,
2596 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2597 return CurDAG->getMachineNode(Opc, dl, VT, Ops, 4);
2602 case ARMISD::VMOVRRD:
2603 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2604 N->getOperand(0), getAL(CurDAG),
2605 CurDAG->getRegister(0, MVT::i32));
2606 case ISD::UMUL_LOHI: {
2607 if (Subtarget->isThumb1Only())
2609 if (Subtarget->isThumb()) {
2610 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2611 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2612 CurDAG->getRegister(0, MVT::i32) };
2613 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32,Ops,4);
2615 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2616 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2617 CurDAG->getRegister(0, MVT::i32) };
2618 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2619 ARM::UMULL : ARM::UMULLv5,
2620 dl, MVT::i32, MVT::i32, Ops, 5);
2623 case ISD::SMUL_LOHI: {
2624 if (Subtarget->isThumb1Only())
2626 if (Subtarget->isThumb()) {
2627 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2628 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2629 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32,Ops,4);
2631 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2632 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2633 CurDAG->getRegister(0, MVT::i32) };
2634 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2635 ARM::SMULL : ARM::SMULLv5,
2636 dl, MVT::i32, MVT::i32, Ops, 5);
2639 case ARMISD::UMLAL:{
2640 if (Subtarget->isThumb()) {
2641 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2642 N->getOperand(3), getAL(CurDAG),
2643 CurDAG->getRegister(0, MVT::i32)};
2644 return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops, 6);
2646 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2647 N->getOperand(3), getAL(CurDAG),
2648 CurDAG->getRegister(0, MVT::i32),
2649 CurDAG->getRegister(0, MVT::i32) };
2650 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2651 ARM::UMLAL : ARM::UMLALv5,
2652 dl, MVT::i32, MVT::i32, Ops, 7);
2655 case ARMISD::SMLAL:{
2656 if (Subtarget->isThumb()) {
2657 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2658 N->getOperand(3), getAL(CurDAG),
2659 CurDAG->getRegister(0, MVT::i32)};
2660 return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops, 6);
2662 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2663 N->getOperand(3), getAL(CurDAG),
2664 CurDAG->getRegister(0, MVT::i32),
2665 CurDAG->getRegister(0, MVT::i32) };
2666 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2667 ARM::SMLAL : ARM::SMLALv5,
2668 dl, MVT::i32, MVT::i32, Ops, 7);
2672 SDNode *ResNode = 0;
2673 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2674 ResNode = SelectT2IndexedLoad(N);
2676 ResNode = SelectARMIndexedLoad(N);
2679 // Other cases are autogenerated.
2682 case ARMISD::BRCOND: {
2683 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2684 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2685 // Pattern complexity = 6 cost = 1 size = 0
2687 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2688 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2689 // Pattern complexity = 6 cost = 1 size = 0
2691 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2692 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2693 // Pattern complexity = 6 cost = 1 size = 0
2695 unsigned Opc = Subtarget->isThumb() ?
2696 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2697 SDValue Chain = N->getOperand(0);
2698 SDValue N1 = N->getOperand(1);
2699 SDValue N2 = N->getOperand(2);
2700 SDValue N3 = N->getOperand(3);
2701 SDValue InFlag = N->getOperand(4);
2702 assert(N1.getOpcode() == ISD::BasicBlock);
2703 assert(N2.getOpcode() == ISD::Constant);
2704 assert(N3.getOpcode() == ISD::Register);
2706 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2707 cast<ConstantSDNode>(N2)->getZExtValue()),
2709 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2710 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2712 Chain = SDValue(ResNode, 0);
2713 if (N->getNumValues() == 2) {
2714 InFlag = SDValue(ResNode, 1);
2715 ReplaceUses(SDValue(N, 1), InFlag);
2717 ReplaceUses(SDValue(N, 0),
2718 SDValue(Chain.getNode(), Chain.getResNo()));
2722 return SelectCMOVOp(N);
2723 case ARMISD::VZIP: {
2725 EVT VT = N->getValueType(0);
2726 switch (VT.getSimpleVT().SimpleTy) {
2727 default: return NULL;
2728 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2729 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2731 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2732 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2733 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2734 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2736 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2738 SDValue Pred = getAL(CurDAG);
2739 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2740 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2741 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2743 case ARMISD::VUZP: {
2745 EVT VT = N->getValueType(0);
2746 switch (VT.getSimpleVT().SimpleTy) {
2747 default: return NULL;
2748 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2749 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2751 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2752 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2753 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2754 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2756 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2758 SDValue Pred = getAL(CurDAG);
2759 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2760 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2761 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2763 case ARMISD::VTRN: {
2765 EVT VT = N->getValueType(0);
2766 switch (VT.getSimpleVT().SimpleTy) {
2767 default: return NULL;
2768 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2769 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2771 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2772 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2773 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2775 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2777 SDValue Pred = getAL(CurDAG);
2778 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2779 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2780 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2782 case ARMISD::BUILD_VECTOR: {
2783 EVT VecVT = N->getValueType(0);
2784 EVT EltVT = VecVT.getVectorElementType();
2785 unsigned NumElts = VecVT.getVectorNumElements();
2786 if (EltVT == MVT::f64) {
2787 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
2788 return PairDRegs(VecVT, N->getOperand(0), N->getOperand(1));
2790 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
2792 return PairSRegs(VecVT, N->getOperand(0), N->getOperand(1));
2793 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
2794 return QuadSRegs(VecVT, N->getOperand(0), N->getOperand(1),
2795 N->getOperand(2), N->getOperand(3));
2798 case ARMISD::VLD2DUP: {
2799 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
2801 return SelectVLDDup(N, false, 2, Opcodes);
2804 case ARMISD::VLD3DUP: {
2805 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
2806 ARM::VLD3DUPd16Pseudo,
2807 ARM::VLD3DUPd32Pseudo };
2808 return SelectVLDDup(N, false, 3, Opcodes);
2811 case ARMISD::VLD4DUP: {
2812 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
2813 ARM::VLD4DUPd16Pseudo,
2814 ARM::VLD4DUPd32Pseudo };
2815 return SelectVLDDup(N, false, 4, Opcodes);
2818 case ARMISD::VLD2DUP_UPD: {
2819 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
2820 ARM::VLD2DUPd16wb_fixed,
2821 ARM::VLD2DUPd32wb_fixed };
2822 return SelectVLDDup(N, true, 2, Opcodes);
2825 case ARMISD::VLD3DUP_UPD: {
2826 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
2827 ARM::VLD3DUPd16Pseudo_UPD,
2828 ARM::VLD3DUPd32Pseudo_UPD };
2829 return SelectVLDDup(N, true, 3, Opcodes);
2832 case ARMISD::VLD4DUP_UPD: {
2833 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
2834 ARM::VLD4DUPd16Pseudo_UPD,
2835 ARM::VLD4DUPd32Pseudo_UPD };
2836 return SelectVLDDup(N, true, 4, Opcodes);
2839 case ARMISD::VLD1_UPD: {
2840 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
2841 ARM::VLD1d16wb_fixed,
2842 ARM::VLD1d32wb_fixed,
2843 ARM::VLD1d64wb_fixed };
2844 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
2845 ARM::VLD1q16wb_fixed,
2846 ARM::VLD1q32wb_fixed,
2847 ARM::VLD1q64wb_fixed };
2848 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
2851 case ARMISD::VLD2_UPD: {
2852 static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed,
2853 ARM::VLD2d16wb_fixed,
2854 ARM::VLD2d32wb_fixed,
2855 ARM::VLD1q64wb_fixed};
2856 static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
2857 ARM::VLD2q16PseudoWB_fixed,
2858 ARM::VLD2q32PseudoWB_fixed };
2859 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
2862 case ARMISD::VLD3_UPD: {
2863 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
2864 ARM::VLD3d16Pseudo_UPD,
2865 ARM::VLD3d32Pseudo_UPD,
2866 ARM::VLD1q64wb_fixed};
2867 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2868 ARM::VLD3q16Pseudo_UPD,
2869 ARM::VLD3q32Pseudo_UPD };
2870 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
2871 ARM::VLD3q16oddPseudo_UPD,
2872 ARM::VLD3q32oddPseudo_UPD };
2873 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2876 case ARMISD::VLD4_UPD: {
2877 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD,
2878 ARM::VLD4d16Pseudo_UPD,
2879 ARM::VLD4d32Pseudo_UPD,
2880 ARM::VLD1q64wb_fixed};
2881 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2882 ARM::VLD4q16Pseudo_UPD,
2883 ARM::VLD4q32Pseudo_UPD };
2884 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
2885 ARM::VLD4q16oddPseudo_UPD,
2886 ARM::VLD4q32oddPseudo_UPD };
2887 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2890 case ARMISD::VLD2LN_UPD: {
2891 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
2892 ARM::VLD2LNd16Pseudo_UPD,
2893 ARM::VLD2LNd32Pseudo_UPD };
2894 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
2895 ARM::VLD2LNq32Pseudo_UPD };
2896 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
2899 case ARMISD::VLD3LN_UPD: {
2900 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
2901 ARM::VLD3LNd16Pseudo_UPD,
2902 ARM::VLD3LNd32Pseudo_UPD };
2903 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
2904 ARM::VLD3LNq32Pseudo_UPD };
2905 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
2908 case ARMISD::VLD4LN_UPD: {
2909 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
2910 ARM::VLD4LNd16Pseudo_UPD,
2911 ARM::VLD4LNd32Pseudo_UPD };
2912 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
2913 ARM::VLD4LNq32Pseudo_UPD };
2914 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
2917 case ARMISD::VST1_UPD: {
2918 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
2919 ARM::VST1d16wb_fixed,
2920 ARM::VST1d32wb_fixed,
2921 ARM::VST1d64wb_fixed };
2922 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
2923 ARM::VST1q16wb_fixed,
2924 ARM::VST1q32wb_fixed,
2925 ARM::VST1q64wb_fixed };
2926 return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
2929 case ARMISD::VST2_UPD: {
2930 static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed,
2931 ARM::VST2d16wb_fixed,
2932 ARM::VST2d32wb_fixed,
2933 ARM::VST1q64wb_fixed};
2934 static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
2935 ARM::VST2q16PseudoWB_fixed,
2936 ARM::VST2q32PseudoWB_fixed };
2937 return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
2940 case ARMISD::VST3_UPD: {
2941 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
2942 ARM::VST3d16Pseudo_UPD,
2943 ARM::VST3d32Pseudo_UPD,
2944 ARM::VST1d64TPseudoWB_fixed};
2945 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
2946 ARM::VST3q16Pseudo_UPD,
2947 ARM::VST3q32Pseudo_UPD };
2948 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
2949 ARM::VST3q16oddPseudo_UPD,
2950 ARM::VST3q32oddPseudo_UPD };
2951 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2954 case ARMISD::VST4_UPD: {
2955 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD,
2956 ARM::VST4d16Pseudo_UPD,
2957 ARM::VST4d32Pseudo_UPD,
2958 ARM::VST1d64QPseudoWB_fixed};
2959 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
2960 ARM::VST4q16Pseudo_UPD,
2961 ARM::VST4q32Pseudo_UPD };
2962 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
2963 ARM::VST4q16oddPseudo_UPD,
2964 ARM::VST4q32oddPseudo_UPD };
2965 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2968 case ARMISD::VST2LN_UPD: {
2969 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
2970 ARM::VST2LNd16Pseudo_UPD,
2971 ARM::VST2LNd32Pseudo_UPD };
2972 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
2973 ARM::VST2LNq32Pseudo_UPD };
2974 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
2977 case ARMISD::VST3LN_UPD: {
2978 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
2979 ARM::VST3LNd16Pseudo_UPD,
2980 ARM::VST3LNd32Pseudo_UPD };
2981 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
2982 ARM::VST3LNq32Pseudo_UPD };
2983 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
2986 case ARMISD::VST4LN_UPD: {
2987 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
2988 ARM::VST4LNd16Pseudo_UPD,
2989 ARM::VST4LNd32Pseudo_UPD };
2990 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
2991 ARM::VST4LNq32Pseudo_UPD };
2992 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
2995 case ISD::INTRINSIC_VOID:
2996 case ISD::INTRINSIC_W_CHAIN: {
2997 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
3002 case Intrinsic::arm_ldrexd: {
3003 SDValue MemAddr = N->getOperand(2);
3004 DebugLoc dl = N->getDebugLoc();
3005 SDValue Chain = N->getOperand(0);
3007 unsigned NewOpc = ARM::LDREXD;
3008 if (Subtarget->isThumb() && Subtarget->hasThumb2())
3009 NewOpc = ARM::t2LDREXD;
3011 // arm_ldrexd returns a i64 value in {i32, i32}
3012 std::vector<EVT> ResTys;
3013 ResTys.push_back(MVT::i32);
3014 ResTys.push_back(MVT::i32);
3015 ResTys.push_back(MVT::Other);
3017 // place arguments in the right order
3018 SmallVector<SDValue, 7> Ops;
3019 Ops.push_back(MemAddr);
3020 Ops.push_back(getAL(CurDAG));
3021 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3022 Ops.push_back(Chain);
3023 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(),
3025 // Transfer memoperands.
3026 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3027 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3028 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
3030 // Until there's support for specifing explicit register constraints
3031 // like the use of even/odd register pair, hardcode ldrexd to always
3032 // use the pair [R0, R1] to hold the load result.
3033 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R0,
3034 SDValue(Ld, 0), SDValue(0,0));
3035 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R1,
3036 SDValue(Ld, 1), Chain.getValue(1));
3039 SDValue Glue = Chain.getValue(1);
3040 if (!SDValue(N, 0).use_empty()) {
3041 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3042 ARM::R0, MVT::i32, Glue);
3043 Glue = Result.getValue(2);
3044 ReplaceUses(SDValue(N, 0), Result);
3046 if (!SDValue(N, 1).use_empty()) {
3047 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3048 ARM::R1, MVT::i32, Glue);
3049 Glue = Result.getValue(2);
3050 ReplaceUses(SDValue(N, 1), Result);
3053 ReplaceUses(SDValue(N, 2), SDValue(Ld, 2));
3057 case Intrinsic::arm_strexd: {
3058 DebugLoc dl = N->getDebugLoc();
3059 SDValue Chain = N->getOperand(0);
3060 SDValue Val0 = N->getOperand(2);
3061 SDValue Val1 = N->getOperand(3);
3062 SDValue MemAddr = N->getOperand(4);
3064 // Until there's support for specifing explicit register constraints
3065 // like the use of even/odd register pair, hardcode strexd to always
3066 // use the pair [R2, R3] to hold the i64 (i32, i32) value to be stored.
3067 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R2, Val0,
3069 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R3, Val1, Chain.getValue(1));
3071 SDValue Glue = Chain.getValue(1);
3072 Val0 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3073 ARM::R2, MVT::i32, Glue);
3074 Glue = Val0.getValue(1);
3075 Val1 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3076 ARM::R3, MVT::i32, Glue);
3078 // Store exclusive double return a i32 value which is the return status
3079 // of the issued store.
3080 std::vector<EVT> ResTys;
3081 ResTys.push_back(MVT::i32);
3082 ResTys.push_back(MVT::Other);
3084 // place arguments in the right order
3085 SmallVector<SDValue, 7> Ops;
3086 Ops.push_back(Val0);
3087 Ops.push_back(Val1);
3088 Ops.push_back(MemAddr);
3089 Ops.push_back(getAL(CurDAG));
3090 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3091 Ops.push_back(Chain);
3093 unsigned NewOpc = ARM::STREXD;
3094 if (Subtarget->isThumb() && Subtarget->hasThumb2())
3095 NewOpc = ARM::t2STREXD;
3097 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(),
3099 // Transfer memoperands.
3100 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3101 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3102 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
3107 case Intrinsic::arm_neon_vld1: {
3108 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
3109 ARM::VLD1d32, ARM::VLD1d64 };
3110 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
3111 ARM::VLD1q32, ARM::VLD1q64};
3112 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
3115 case Intrinsic::arm_neon_vld2: {
3116 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
3117 ARM::VLD2d32, ARM::VLD1q64 };
3118 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
3119 ARM::VLD2q32Pseudo };
3120 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
3123 case Intrinsic::arm_neon_vld3: {
3124 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
3127 ARM::VLD1d64TPseudo };
3128 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
3129 ARM::VLD3q16Pseudo_UPD,
3130 ARM::VLD3q32Pseudo_UPD };
3131 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
3132 ARM::VLD3q16oddPseudo,
3133 ARM::VLD3q32oddPseudo };
3134 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3137 case Intrinsic::arm_neon_vld4: {
3138 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
3141 ARM::VLD1d64QPseudo };
3142 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
3143 ARM::VLD4q16Pseudo_UPD,
3144 ARM::VLD4q32Pseudo_UPD };
3145 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
3146 ARM::VLD4q16oddPseudo,
3147 ARM::VLD4q32oddPseudo };
3148 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3151 case Intrinsic::arm_neon_vld2lane: {
3152 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
3153 ARM::VLD2LNd16Pseudo,
3154 ARM::VLD2LNd32Pseudo };
3155 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
3156 ARM::VLD2LNq32Pseudo };
3157 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
3160 case Intrinsic::arm_neon_vld3lane: {
3161 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
3162 ARM::VLD3LNd16Pseudo,
3163 ARM::VLD3LNd32Pseudo };
3164 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
3165 ARM::VLD3LNq32Pseudo };
3166 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
3169 case Intrinsic::arm_neon_vld4lane: {
3170 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
3171 ARM::VLD4LNd16Pseudo,
3172 ARM::VLD4LNd32Pseudo };
3173 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
3174 ARM::VLD4LNq32Pseudo };
3175 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
3178 case Intrinsic::arm_neon_vst1: {
3179 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
3180 ARM::VST1d32, ARM::VST1d64 };
3181 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
3182 ARM::VST1q32, ARM::VST1q64 };
3183 return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
3186 case Intrinsic::arm_neon_vst2: {
3187 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
3188 ARM::VST2d32, ARM::VST1q64 };
3189 static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
3190 ARM::VST2q32Pseudo };
3191 return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
3194 case Intrinsic::arm_neon_vst3: {
3195 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
3198 ARM::VST1d64TPseudo };
3199 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3200 ARM::VST3q16Pseudo_UPD,
3201 ARM::VST3q32Pseudo_UPD };
3202 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
3203 ARM::VST3q16oddPseudo,
3204 ARM::VST3q32oddPseudo };
3205 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3208 case Intrinsic::arm_neon_vst4: {
3209 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
3212 ARM::VST1d64QPseudo };
3213 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3214 ARM::VST4q16Pseudo_UPD,
3215 ARM::VST4q32Pseudo_UPD };
3216 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
3217 ARM::VST4q16oddPseudo,
3218 ARM::VST4q32oddPseudo };
3219 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3222 case Intrinsic::arm_neon_vst2lane: {
3223 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
3224 ARM::VST2LNd16Pseudo,
3225 ARM::VST2LNd32Pseudo };
3226 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
3227 ARM::VST2LNq32Pseudo };
3228 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
3231 case Intrinsic::arm_neon_vst3lane: {
3232 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
3233 ARM::VST3LNd16Pseudo,
3234 ARM::VST3LNd32Pseudo };
3235 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
3236 ARM::VST3LNq32Pseudo };
3237 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
3240 case Intrinsic::arm_neon_vst4lane: {
3241 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
3242 ARM::VST4LNd16Pseudo,
3243 ARM::VST4LNd32Pseudo };
3244 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
3245 ARM::VST4LNq32Pseudo };
3246 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
3252 case ISD::INTRINSIC_WO_CHAIN: {
3253 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3258 case Intrinsic::arm_neon_vtbl2:
3259 return SelectVTBL(N, false, 2, ARM::VTBL2);
3260 case Intrinsic::arm_neon_vtbl3:
3261 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
3262 case Intrinsic::arm_neon_vtbl4:
3263 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
3265 case Intrinsic::arm_neon_vtbx2:
3266 return SelectVTBL(N, true, 2, ARM::VTBX2);
3267 case Intrinsic::arm_neon_vtbx3:
3268 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
3269 case Intrinsic::arm_neon_vtbx4:
3270 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
3275 case ARMISD::VTBL1: {
3276 DebugLoc dl = N->getDebugLoc();
3277 EVT VT = N->getValueType(0);
3278 SmallVector<SDValue, 6> Ops;
3280 Ops.push_back(N->getOperand(0));
3281 Ops.push_back(N->getOperand(1));
3282 Ops.push_back(getAL(CurDAG)); // Predicate
3283 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3284 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops.data(), Ops.size());
3286 case ARMISD::VTBL2: {
3287 DebugLoc dl = N->getDebugLoc();
3288 EVT VT = N->getValueType(0);
3290 // Form a REG_SEQUENCE to force register allocation.
3291 SDValue V0 = N->getOperand(0);
3292 SDValue V1 = N->getOperand(1);
3293 SDValue RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
3295 SmallVector<SDValue, 6> Ops;
3296 Ops.push_back(RegSeq);
3297 Ops.push_back(N->getOperand(2));
3298 Ops.push_back(getAL(CurDAG)); // Predicate
3299 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3300 return CurDAG->getMachineNode(ARM::VTBL2, dl, VT,
3301 Ops.data(), Ops.size());
3304 case ISD::CONCAT_VECTORS:
3305 return SelectConcatVector(N);
3307 case ARMISD::ATOMOR64_DAG:
3308 return SelectAtomic64(N, ARM::ATOMOR6432);
3309 case ARMISD::ATOMXOR64_DAG:
3310 return SelectAtomic64(N, ARM::ATOMXOR6432);
3311 case ARMISD::ATOMADD64_DAG:
3312 return SelectAtomic64(N, ARM::ATOMADD6432);
3313 case ARMISD::ATOMSUB64_DAG:
3314 return SelectAtomic64(N, ARM::ATOMSUB6432);
3315 case ARMISD::ATOMNAND64_DAG:
3316 return SelectAtomic64(N, ARM::ATOMNAND6432);
3317 case ARMISD::ATOMAND64_DAG:
3318 return SelectAtomic64(N, ARM::ATOMAND6432);
3319 case ARMISD::ATOMSWAP64_DAG:
3320 return SelectAtomic64(N, ARM::ATOMSWAP6432);
3321 case ARMISD::ATOMCMPXCHG64_DAG:
3322 return SelectAtomic64(N, ARM::ATOMCMPXCHG6432);
3325 return SelectCode(N);
3328 bool ARMDAGToDAGISel::
3329 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
3330 std::vector<SDValue> &OutOps) {
3331 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
3332 // Require the address to be in a register. That is safe for all ARM
3333 // variants and it is hard to do anything much smarter without knowing
3334 // how the operand is used.
3335 OutOps.push_back(Op);
3339 /// createARMISelDag - This pass converts a legalized DAG into a
3340 /// ARM-specific DAG, ready for instruction scheduling.
3342 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
3343 CodeGenOpt::Level OptLevel) {
3344 return new ARMDAGToDAGISel(TM, OptLevel);